filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pre_commit/util.py | from __future__ import unicode_literals
import contextlib
import errno
import functools
import os.path
import shutil
import stat
import subprocess
import tempfile
import pkg_resources
import six
from pre_commit import five
from pre_commit import parse_shebang
@contextlib.contextmanager
def cwd(path):
original_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_cwd)
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
def memoize_by_cwd(func):
"""Memoize a function call based on os.getcwd()."""
@functools.wraps(func)
def wrapper(*args):
cwd = os.getcwd()
key = (cwd,) + args
try:
return wrapper._cache[key]
except KeyError:
ret = wrapper._cache[key] = func(*args)
return ret
wrapper._cache = {}
return wrapper
@contextlib.contextmanager
def clean_path_on_failure(path):
"""Cleans up the directory on an exceptional failure."""
try:
yield
except BaseException:
if os.path.exists(path):
rmtree(path)
raise
@contextlib.contextmanager
def noop_context():
yield
def no_git_env():
# Too many bugs dealing with environment variables and GIT:
# https://github.com/pre-commit/pre-commit/issues/300
# In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
# pre-commit hooks
# In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
# while running pre-commit hooks in submodules.
# GIT_DIR: Causes git clone to clone wrong thing
# GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
return {
k: v for k, v in os.environ.items()
if not k.startswith('GIT_') or k in {'GIT_SSH'}
}
@contextlib.contextmanager
def tmpdir():
"""Contextmanager to create a temporary directory. It will be cleaned up
afterwards.
"""
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
def resource_filename(*segments):
return pkg_resources.resource_filename(
'pre_commit', os.path.join('resources', *segments),
)
def make_executable(filename):
original_mode = os.stat(filename).st_mode
os.chmod(
filename, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
)
class CalledProcessError(RuntimeError):
def __init__(self, returncode, cmd, expected_returncode, output=None):
super(CalledProcessError, self).__init__(
returncode, cmd, expected_returncode, output,
)
self.returncode = returncode
self.cmd = cmd
self.expected_returncode = expected_returncode
self.output = output
def to_bytes(self):
output = []
for maybe_text in self.output:
if maybe_text:
output.append(
b'\n ' +
five.to_bytes(maybe_text).replace(b'\n', b'\n '),
)
else:
output.append(b'(none)')
return b''.join((
five.to_bytes(
'Command: {!r}\n'
'Return code: {}\n'
'Expected return code: {}\n'.format(
self.cmd, self.returncode, self.expected_returncode,
),
),
b'Output: ', output[0], b'\n',
b'Errors: ', output[1], b'\n',
))
def to_text(self):
return self.to_bytes().decode('UTF-8')
if six.PY2: # pragma: no cover (py2)
__str__ = to_bytes
__unicode__ = to_text
else: # pragma: no cover (py3)
__bytes__ = to_bytes
__str__ = to_text
def cmd_output(*cmd, **kwargs):
retcode = kwargs.pop('retcode', 0)
encoding = kwargs.pop('encoding', 'UTF-8')
__popen = kwargs.pop('__popen', subprocess.Popen)
popen_kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
# py2/py3 on windows are more strict about the types here
cmd = tuple(five.n(arg) for arg in cmd)
kwargs['env'] = {
five.n(key): five.n(value)
for key, value in kwargs.pop('env', {}).items()
} or None
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
returncode, stdout, stderr = e.to_output()
else:
popen_kwargs.update(kwargs)
proc = __popen(cmd, **popen_kwargs)
stdout, stderr = proc.communicate()
returncode = proc.returncode
if encoding is not None and stdout is not None:
stdout = stdout.decode(encoding)
if encoding is not None and stderr is not None:
stderr = stderr.decode(encoding)
if retcode is not None and retcode != returncode:
raise CalledProcessError(
returncode, cmd, retcode, output=(stdout, stderr),
)
return returncode, stdout, stderr
def rmtree(path):
"""On windows, rmtree fails for readonly dirs."""
def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
excvalue = exc[1]
if (
func in (os.rmdir, os.remove, os.unlink) and
excvalue.errno == errno.EACCES
):
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
def copy_tree_to_path(src_dir, dest_dir):
"""Copies all of the things inside src_dir to an already existing dest_dir.
This looks eerily similar to shutil.copytree, but copytree has no option
for not creating dest_dir.
"""
names = os.listdir(src_dir)
for name in names:
srcname = os.path.join(src_dir, name)
destname = os.path.join(dest_dir, name)
if os.path.isdir(srcname):
shutil.copytree(srcname, destname)
else:
shutil.copy(srcname, destname)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
3rdparty/webkit/Tools/gtk/gtkdoc.py | # Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import errno
import logging
import os
import os.path
import subprocess
import sys
class GTKDoc(object):
"""Class that controls a gtkdoc run.
Each instance of this class represents one gtkdoc configuration
and set of documentation. The gtkdoc package is a series of tools
run consecutively which converts inline C/C++ documentation into
docbook files and then into HTML. This class is suitable for
generating documentation or simply verifying correctness.
Keyword arguments:
output_dir -- The path where gtkdoc output should be placed. Generation
may overwrite file in this directory. Required.
module_name -- The name of the documentation module. For libraries this
is typically the library name. Required if not library path
is given.
source_dirs -- A list of paths to directories of source code to be scanned.
Required if headers is not specified.
ignored_files -- A list of filenames to ignore in the source directory. It is
only necessary to provide the basenames of these files.
Typically it is important to provide an updated list of
ignored files to prevent warnings about undocumented symbols.
headers -- A list of paths to headers to be scanned. Required if source_dirs
is not specified.
namespace -- The library namespace.
decorator -- If a decorator is used to unhide certain symbols in header
files this parameter is required for successful scanning.
(default '')
deprecation_guard -- gtkdoc tries to ensure that symbols marked as deprecated
are encased in this C preprocessor define. This is required
to avoid gtkdoc warnings. (default '')
cflags -- This parameter specifies any preprocessor flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes all absolute include paths necessary to resolve
all header dependencies. (default '')
ldflags -- This parameter specifies any linker flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes "-lyourlibraryname". (default '')
library_path -- This parameter specifies the path to the directory where you
library resides used for building the scanner binary during
gtkdoc-scanobj. (default '')
doc_dir -- The path to other documentation files necessary to build
the documentation. This files in this directory as well as
the files in the 'html' subdirectory will be copied
recursively into the output directory. (default '')
main_sgml_file -- The path or name (if a doc_dir is given) of the SGML file
that is the considered the main page of your documentation.
(default: <module_name>-docs.sgml)
version -- The version number of the module. If this is provided,
a version.xml file containing the version will be created
in the output directory during documentation generation.
interactive -- Whether or not errors or warnings should prompt the user
to continue or not. When this value is false, generation
will continue despite warnings. (default False)
virtual_root -- A temporary installation directory which is used as the root
where the actual installation prefix lives; this is mostly
useful for packagers, and should be set to what is given to
make install as DESTDIR.
"""
def __init__(self, args):
# Parameters specific to scanning.
self.module_name = ''
self.source_dirs = []
self.headers = []
self.ignored_files = []
self.namespace = ''
self.decorator = ''
self.deprecation_guard = ''
# Parameters specific to gtkdoc-scanobj.
self.cflags = ''
self.ldflags = ''
self.library_path = ''
# Parameters specific to generation.
self.output_dir = ''
self.doc_dir = ''
self.main_sgml_file = ''
# Parameters specific to gtkdoc-fixxref.
self.cross_reference_deps = []
self.interactive = False
self.logger = logging.getLogger('gtkdoc')
for key, value in iter(args.items()):
setattr(self, key, value)
if not getattr(self, 'output_dir'):
raise Exception('output_dir not specified.')
if not getattr(self, 'module_name'):
raise Exception('module_name not specified.')
if not getattr(self, 'source_dirs') and not getattr(self, 'headers'):
raise Exception('Neither source_dirs nor headers specified.' % key)
# Make all paths absolute in case we were passed relative paths, since
# we change the current working directory when executing subcommands.
self.output_dir = os.path.abspath(self.output_dir)
self.source_dirs = [os.path.abspath(x) for x in self.source_dirs]
self.headers = [os.path.abspath(x) for x in self.headers]
if self.library_path:
self.library_path = os.path.abspath(self.library_path)
if not self.main_sgml_file:
self.main_sgml_file = self.module_name + "-docs.sgml"
def generate(self, html=True):
self.saw_warnings = False
self._copy_doc_files_to_output_dir(html)
self._write_version_xml()
self._run_gtkdoc_scan()
self._run_gtkdoc_scangobj()
self._run_gtkdoc_mkdb()
if not html:
return
self._run_gtkdoc_mkhtml()
self._run_gtkdoc_fixxref()
def _delete_file_if_exists(self, path):
if not os.access(path, os.F_OK | os.R_OK):
return
self.logger.debug('deleting %s', path)
os.unlink(path)
def _create_directory_if_nonexistent(self, path):
try:
os.makedirs(path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _raise_exception_if_file_inaccessible(self, path):
if not os.path.exists(path) or not os.access(path, os.R_OK):
raise Exception("Could not access file at: %s" % path)
def _output_has_warnings(self, outputs):
for output in outputs:
if output and output.find('warning'):
return True
return False
def _ask_yes_or_no_question(self, question):
if not self.interactive:
return True
question += ' [y/N] '
answer = None
while answer != 'y' and answer != 'n' and answer != '':
answer = raw_input(question).lower()
return answer == 'y'
def _run_command(self, args, env=None, cwd=None, print_output=True, ignore_warnings=False):
if print_output:
self.logger.debug("Running %s", args[0])
self.logger.debug("Full command args: %s", str(args))
process = subprocess.Popen(args, env=env, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = [b.decode("utf-8") for b in process.communicate()]
if print_output:
if stdout:
try:
sys.stdout.write(stdout.encode("utf-8"))
except UnicodeDecodeError:
sys.stdout.write(stdout)
if stderr:
try:
sys.stderr.write(stderr.encode("utf-8"))
except UnicodeDecodeError:
sys.stderr.write(stderr)
if process.returncode != 0:
raise Exception('%s produced a non-zero return code %i'
% (args[0], process.returncode))
if not ignore_warnings and ('warning' in stderr or 'warning' in stdout):
self.saw_warnings = True
if not self._ask_yes_or_no_question('%s produced warnings, '
'try to continue?' % args[0]):
raise Exception('%s step failed' % args[0])
return stdout.strip()
def _copy_doc_files_to_output_dir(self, html=True):
if not self.doc_dir:
self.logger.info('Not copying any files from doc directory,'
' because no doc directory given.')
return
def copy_file_replacing_existing(src, dest):
if os.path.isdir(src):
self.logger.debug('skipped directory %s', src)
return
if not os.access(src, os.F_OK | os.R_OK):
self.logger.debug('skipped unreadable %s', src)
return
self._delete_file_if_exists(dest)
self.logger.debug('created %s', dest)
try:
os.link(src, dest)
except OSError:
os.symlink(src, dest)
def copy_all_files_in_directory(src, dest):
for path in os.listdir(src):
copy_file_replacing_existing(os.path.join(src, path),
os.path.join(dest, path))
self.logger.debug('Copying template files to output directory...')
self._create_directory_if_nonexistent(self.output_dir)
copy_all_files_in_directory(self.doc_dir, self.output_dir)
if not html:
return
self.logger.debug('Copying HTML files to output directory...')
html_src_dir = os.path.join(self.doc_dir, 'html')
html_dest_dir = os.path.join(self.output_dir, 'html')
self._create_directory_if_nonexistent(html_dest_dir)
if os.path.exists(html_src_dir):
copy_all_files_in_directory(html_src_dir, html_dest_dir)
def _write_version_xml(self):
if not self.version:
self.logger.info('No version specified, so not writing version.xml')
return
version_xml_path = os.path.join(self.output_dir, 'version.xml')
src_version_xml_path = os.path.join(self.doc_dir, 'version.xml')
# Don't overwrite version.xml if it was in the doc directory.
if os.path.exists(version_xml_path) and \
os.path.exists(src_version_xml_path):
return
output_file = open(version_xml_path, 'w')
output_file.write(self.version)
output_file.close()
def _ignored_files_basenames(self):
return ' '.join([os.path.basename(x) for x in self.ignored_files])
def _run_gtkdoc_scan(self):
args = ['gtkdoc-scan',
'--module=%s' % self.module_name,
'--rebuild-types']
if not self.headers:
# Each source directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
if self.decorator:
args.append('--ignore-decorators=%s' % self.decorator)
if self.deprecation_guard:
args.append('--deprecated-guards=%s' % self.deprecation_guard)
if self.output_dir:
args.append('--output-dir=%s' % self.output_dir)
# We only need to pass the list of ignored files if the we are not using an explicit list of headers.
if not self.headers:
# gtkdoc-scan wants the basenames of ignored headers, so strip the
# dirname. Different from "--source-dir", the headers should be
# specified as one long string.
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-headers=%s' % ignored_files_basenames)
if self.headers:
args.extend(self.headers)
self._run_command(args)
def _run_gtkdoc_scangobj(self):
env = os.environ
ldflags = self.ldflags
if self.library_path:
additional_ldflags = ''
for arg in env.get('LDFLAGS', '').split(' '):
if arg.startswith('-L'):
additional_ldflags = '%s %s' % (additional_ldflags, arg)
ldflags = ' "-L%s" %s ' % (self.library_path, additional_ldflags) + ldflags
current_ld_library_path = env.get('LD_LIBRARY_PATH')
if current_ld_library_path:
env['LD_LIBRARY_PATH'] = '%s:%s' % (self.library_path, current_ld_library_path)
else:
env['LD_LIBRARY_PATH'] = self.library_path
if ldflags:
env['LDFLAGS'] = '%s %s' % (ldflags, env.get('LDFLAGS', ''))
if self.cflags:
env['CFLAGS'] = '%s %s' % (self.cflags, env.get('CFLAGS', ''))
if 'CFLAGS' in env:
self.logger.debug('CFLAGS=%s', env['CFLAGS'])
if 'LDFLAGS' in env:
self.logger.debug('LDFLAGS %s', env['LDFLAGS'])
self._run_command(['gtkdoc-scangobj', '--module=%s' % self.module_name],
env=env, cwd=self.output_dir)
def _run_gtkdoc_mkdb(self):
sgml_file = os.path.join(self.output_dir, self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
args = ['gtkdoc-mkdb',
'--module=%s' % self.module_name,
'--main-sgml-file=%s' % sgml_file,
'--source-suffixes=h,c,cpp,cc',
'--output-format=xml',
'--sgml-mode']
if self.namespace:
args.append('--name-space=%s' % self.namespace)
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-files=%s' % ignored_files_basenames)
# Each directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
self._run_command(args, cwd=self.output_dir)
def _run_gtkdoc_mkhtml(self):
html_dest_dir = os.path.join(self.output_dir, 'html')
if not os.path.isdir(html_dest_dir):
raise Exception("%s is not a directory, could not generate HTML"
% html_dest_dir)
elif not os.access(html_dest_dir, os.X_OK | os.R_OK | os.W_OK):
raise Exception("Could not access %s to generate HTML"
% html_dest_dir)
# gtkdoc-mkhtml expects the SGML path to be absolute.
sgml_file = os.path.join(os.path.abspath(self.output_dir),
self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
self._run_command(['gtkdoc-mkhtml', self.module_name, sgml_file],
cwd=html_dest_dir)
def _run_gtkdoc_fixxref(self):
args = ['gtkdoc-fixxref',
'--module=%s' % self.module_name,
'--module-dir=html',
'--html-dir=html']
args.extend(['--extra-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
self._run_command(args, cwd=self.output_dir, ignore_warnings=True)
def rebase_installed_docs(self):
if not os.path.isdir(self.output_dir):
raise Exception("Tried to rebase documentation before generating it.")
html_dir = os.path.join(self.virtual_root + self.prefix, 'share', 'gtk-doc', 'html', self.module_name)
if not os.path.isdir(html_dir):
return
args = ['gtkdoc-rebase',
'--relative',
'--html-dir=%s' % html_dir]
args.extend(['--other-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
if self.virtual_root:
args.extend(['--dest-dir=%s' % self.virtual_root])
self._run_command(args, cwd=self.output_dir)
def api_missing_documentation(self):
unused_doc_file = os.path.join(self.output_dir, self.module_name + "-unused.txt")
if not os.path.exists(unused_doc_file) or not os.access(unused_doc_file, os.R_OK):
return []
return open(unused_doc_file).read().splitlines()
class PkgConfigGTKDoc(GTKDoc):
"""Class reads a library's pkgconfig file to guess gtkdoc parameters.
Some gtkdoc parameters can be guessed by reading a library's pkgconfig
file, including the cflags, ldflags and version parameters. If you
provide these parameters as well, they will be appended to the ones
guessed via the pkgconfig file.
Keyword arguments:
pkg_config_path -- Path to the pkgconfig file for the library. Required.
"""
def __init__(self, pkg_config_path, args):
super(PkgConfigGTKDoc, self).__init__(args)
pkg_config = os.environ.get('PKG_CONFIG', 'pkg-config')
if not os.path.exists(pkg_config_path):
raise Exception('Could not find pkg-config file at: %s'
% pkg_config_path)
self.cflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--cflags'], print_output=False)
self.ldflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--libs'], print_output=False)
self.version = self._run_command([pkg_config,
pkg_config_path,
'--modversion'], print_output=False)
self.prefix = self._run_command([pkg_config,
pkg_config_path,
'--variable=prefix'], print_output=False)
| []
| []
| [
"PKG_CONFIG"
]
| [] | ["PKG_CONFIG"] | python | 1 | 0 | |
stripe/CreateCustomer/fn.go | package createcustomer
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
stripe "github.com/stripe/stripe-go"
"github.com/stripe/stripe-go/customer"
)
// Params - CreateCustomer query parameters
type Params struct {
email string
description string
token string
}
var errorFormat = "{\"error\": {\"message\": \"%s\"}}"
// CreateCustomer - Request: example.com/[email protected]&desc=testing&token=testing
func CreateCustomer(w http.ResponseWriter, r *http.Request) {
stripe.Key = os.Getenv("STRIPE_KEY")
args := Params{
email: r.URL.Query().Get("email"),
description: r.URL.Query().Get("desc"),
token: r.URL.Query().Get("token"),
}
params := &stripe.CustomerParams{
Email: stripe.String(args.email),
Description: stripe.String(args.description),
}
params.SetSource(args.token)
cus, err := customer.New(params)
if err != nil {
errorResponse(w, err)
return
}
customerJSON, err := json.Marshal(cus)
if err != nil {
errorResponse(w, err)
return
}
if _, err := w.Write(customerJSON); err != nil {
errorResponse(w, err)
return
}
}
func errorResponse(w http.ResponseWriter, err error) {
errorJSON, err := json.Marshal(fmt.Sprintf(errorFormat, err.Error()))
if err != nil {
log.Fatalf("Couldn't convert error to json: %s", err.Error())
}
if _, err = w.Write(errorJSON); err != nil {
log.Fatalf("Couldn't write the error response: %s", err.Error())
}
}
| [
"\"STRIPE_KEY\""
]
| []
| [
"STRIPE_KEY"
]
| [] | ["STRIPE_KEY"] | go | 1 | 0 | |
api/API.go | package api
import (
"bytes"
"fmt"
"net/http"
"os"
"time"
)
const URL = "https://sokolmeteo.com"
var loginPayload string
func init() {
loginPayload = os.Getenv("login_payload")
if loginPayload == "" {
panic("no login payload")
}
}
func Login() (*http.Response, error) {
data := []byte(loginPayload)
req, err := http.NewRequest("POST", URL + "/platform/api/user/login", bytes.NewBuffer(data))
if err != nil {
return nil, err
}
// set content type
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: time.Second * 30}
return client.Do(req)
}
func GetDailyData(deviceID string, cookies []*http.Cookie) (*http.Response, error) {
endDate := time.Now().AddDate(0, 0, 1)
startDate := time.Now().AddDate(0, 0, -1)
endDateFormatted := fmt.Sprintf("%d.%d.%d",
endDate.Day(), endDate.Month(), endDate.Year())
startDateFormatted := fmt.Sprintf("%d.%d.%d",
startDate.Day(), startDate.Month(), startDate.Year())
postfix := "/api/analytics/record?" +
"deviceId=" + deviceID + "&" +
"startDate=" + startDateFormatted + "&" +
"endDate=" + endDateFormatted + "&" +
"parameters=EVS,UVI,L,LI,RSSI,RN,TRR,TR2,t,WD,HM,WV,WM,UV,Upow,PR1,PR,KS,V,TP,TR,AN9,WV2,td"
req, err := http.NewRequest("POST", URL + postfix, nil)
if err != nil {
return nil, err
}
// attach access cookies
for _, c := range cookies {
req.AddCookie(c)
}
client := &http.Client{Timeout: time.Second * 60}
return client.Do(req)
} | [
"\"login_payload\""
]
| []
| [
"login_payload"
]
| [] | ["login_payload"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookkeep.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pygame_menu/examples/scroll_menu.py | # coding=utf-8
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - SCROLL MENU
Shows scrolling in menu.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2020 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
import os
import pygame
import pygame_menu
from functools import partial
FPS = 30.0
H_SIZE = 600 # Height of window size
W_SIZE = 800 # Width of window size
def on_button_click(value=None, text=None):
"""
Button event on menus.
:param value: Button value
:param text: Button text
:return: None
"""
if not text:
print('Hello from {}'.format(value))
else:
print('Hello from {} with {}'.format(text, value))
def paint_background(surface):
"""
Paints a given surface with background color.
:param surface: Pygame surface
:type surface: :py:class:`pygame.Surface`
:return: None
"""
surface.fill((128, 230, 198))
def make_long_menu():
"""
Create a long scrolling menu.
:return: Menu
:rtype: pygame_menu.Menu
"""
# Main menu, pauses execution of the application
_menu = pygame_menu.Menu(
height=400,
onclose=pygame_menu.events.EXIT,
theme=pygame_menu.themes.THEME_BLUE,
title='Main Menu',
width=600, # px
)
_menu_sub = pygame_menu.Menu(
columns=4,
height=400,
onclose=pygame_menu.events.EXIT,
rows=3,
theme=pygame_menu.themes.THEME_GREEN,
title='Menu with columns',
width=600,
)
_menu_text = pygame_menu.Menu(
height=400,
onclose=pygame_menu.events.EXIT,
theme=pygame_menu.themes.THEME_DARK,
title='Text with scroll',
width=600,
)
_menu.add_button('Rows and Columns', _menu_sub)
_menu.add_button('Text scrolled', _menu_text)
_menu.add_vertical_margin(20) # Adds margin
label1 = 'Button n°{}'
label2 = 'Text n°{}: '
for i in range(1, 20):
if i % 2 == 0:
_menu.add_button(label1.format(i),
on_button_click,
'Button n°{}'.format(i))
else:
_menu.add_text_input(label2.format(i),
onchange=on_button_click,
text='Text n°{}'.format(i))
_menu.add_button('Exit', pygame_menu.events.EXIT)
label = 'Button n°{}'
for i in range(1, 11):
# Test large button
if i == 5:
txt = 'This is a very long button!'
else:
txt = label.format(100 * i)
_menu_sub.add_button(txt, on_button_click, 100 * i)
_menu_sub.add_button('Back', pygame_menu.events.BACK)
_menu_sub.center_content()
_menu_text.add_label('Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod '
'tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, '
'quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. '
'Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu '
'fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in '
'culpa qui officia deserunt mollit anim id est laborum.',
max_char=33,
align=pygame_menu.locals.ALIGN_LEFT,
margin=(0, -1))
return _menu
def main(test=False):
"""
Main function.
:param test: Indicate function is being tested
:type test: bool
:return: None
"""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
clock = pygame.time.Clock()
# Create window
screen = pygame.display.set_mode((W_SIZE, H_SIZE))
pygame.display.set_caption('Example - Scrolling Menu')
# Create menu
menu = make_long_menu()
# -------------------------------------------------------------------------
# Main loop
# -------------------------------------------------------------------------
while True:
# Tick
clock.tick(FPS)
# Paint background
paint_background(screen)
# Execute main from principal menu if is enabled
menu.mainloop(surface=screen,
bgfun=partial(paint_background, screen),
disable_loop=test,
fps_limit=FPS)
# Update surface
pygame.display.flip()
# At first loop returns
if test:
break
if __name__ == '__main__':
main()
| []
| []
| [
"SDL_VIDEO_CENTERED"
]
| [] | ["SDL_VIDEO_CENTERED"] | python | 1 | 0 | |
plugins/cb_buttons.py | import os
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from plugins.yt_dlp_button import yt_dlp_call_back
from plugins.dl_button import ddl_call_back
from translation import Translation
from pyrogram import Client
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
@Client.on_callback_query()
async def button(bot, update):
if "|" in update.data:
await yt_dlp_call_back(bot, update)
elif "=" in update.data:
await ddl_call_back(bot, update)
elif update.data == "home":
await update.message.edit_text(
text=Translation.START_TEXT.format(update.from_user.mention),
reply_markup=Translation.START_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "help":
await update.message.edit_text(
text=Translation.HELP_TEXT,
reply_markup=Translation.HELP_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "about":
await update.message.edit_text(
text=Translation.ABOUT_TEXT,
reply_markup=Translation.ABOUT_BUTTONS,
disable_web_page_preview=True
)
else:
await update.message.delete()
| []
| []
| [
"WEBHOOK"
]
| [] | ["WEBHOOK"] | python | 1 | 0 | |
pyopenjtalk/__init__.py | import os
from os.path import exists
import pkg_resources
import six
from tqdm.auto import tqdm
if six.PY2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
import tarfile
try:
from .version import __version__ # NOQA
except ImportError:
raise ImportError("BUG: version.py doesn't exist. Please file a bug report.")
import locale
from .htsengine import HTSEngine
from .openjtalk import CreateUserDict, OpenJTalk
path_encoding = locale.getpreferredencoding()
path_encoding = locale.getpreferredencoding()
# Dictionary directory
# defaults to the package directory where the dictionary will be automatically downloaded
OPEN_JTALK_DICT_DIR = os.environ.get(
"OPEN_JTALK_DICT_DIR",
pkg_resources.resource_filename(__name__, "open_jtalk_dic_utf_8-1.11"),
)
_dict_download_url = "https://github.com/r9y9/open_jtalk/releases/download/v1.11.1"
_DICT_URL = f"{_dict_download_url}/open_jtalk_dic_utf_8-1.11.tar.gz"
# Default mei_normal.voice for HMM-based TTS
DEFAULT_HTS_VOICE = pkg_resources.resource_filename(
__name__, "htsvoice/mei_normal.htsvoice"
)
# Global instance of OpenJTalk
_global_jtalk = None
# Global instance of HTSEngine
# mei_normal.voice is used as default
_global_htsengine = None
# https://github.com/tqdm/tqdm#hooks-and-callbacks
class _TqdmUpTo(tqdm): # type: ignore
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
return self.update(b * bsize - self.n)
def _extract_dic():
global OPEN_JTALK_DICT_DIR
filename = pkg_resources.resource_filename(__name__, "dic.tar.gz")
print('Downloading: "{}"'.format(_DICT_URL))
with _TqdmUpTo(
unit="B",
unit_scale=True,
unit_divisor=1024,
miniters=1,
desc="dic.tar.gz",
) as t: # all optional kwargs
urlretrieve(_DICT_URL, filename, reporthook=t.update_to)
t.total = t.n
print("Extracting tar file {}".format(filename))
with tarfile.open(filename, mode="r|gz") as f:
f.extractall(path=pkg_resources.resource_filename(__name__, ""))
OPEN_JTALK_DICT_DIR = pkg_resources.resource_filename(
__name__, "open_jtalk_dic_utf_8-1.11"
)
os.remove(filename)
def _lazy_init():
if not exists(OPEN_JTALK_DICT_DIR):
_extract_dic()
def g2p(*args, **kwargs):
"""Grapheme-to-phoeneme (G2P) conversion
This is just a convenient wrapper around `run_frontend`.
Args:
text (str): Unicode Japanese text.
kana (bool): If True, returns the pronunciation in katakana, otherwise in phone.
Default is False.
join (bool): If True, concatenate phones or katakana's into a single string.
Default is True.
Returns:
str or list: G2P result in 1) str if join is True 2) list if join is False.
"""
global _global_jtalk
if _global_jtalk is None:
_lazy_init()
_global_jtalk = OpenJTalk(dn_mecab=OPEN_JTALK_DICT_DIR.encode(path_encoding))
return _global_jtalk.g2p(*args, **kwargs)
def extract_fullcontext(text):
"""Extract full-context labels from text
Args:
text (str): Input text
Returns:
list: List of full-context labels
"""
# note: drop first return
_, labels = run_frontend(text)
return labels
def synthesize(labels, speed=1.0, half_tone=0.0):
"""Run OpenJTalk's speech synthesis backend
Args:
labels (list): Full-context labels
speed (float): speech speed rate. Default is 1.0.
half_tone (float): additional half-tone. Default is 0.
Returns:
np.ndarray: speech waveform (dtype: np.float64)
int: sampling frequency (defualt: 48000)
"""
if isinstance(labels, tuple) and len(labels) == 2:
labels = labels[1]
global _global_htsengine
if _global_htsengine is None:
_global_htsengine = HTSEngine(DEFAULT_HTS_VOICE.encode(path_encoding))
sr = _global_htsengine.get_sampling_frequency()
_global_htsengine.set_speed(speed)
_global_htsengine.add_half_tone(half_tone)
return _global_htsengine.synthesize(labels), sr
def tts(text, speed=1.0, half_tone=0.0):
"""Text-to-speech
Args:
text (str): Input text
speed (float): speech speed rate. Default is 1.0.
half_tone (float): additional half-tone. Default is 0.
Returns:
np.ndarray: speech waveform (dtype: np.float64)
int: sampling frequency (defualt: 48000)
"""
return synthesize(extract_fullcontext(text), speed, half_tone)
def run_frontend(text, verbose=0):
"""Run OpenJTalk's text processing frontend
Args:
text (str): Unicode Japanese text.
verbose (int): Verbosity. Default is 0.
Returns:
tuple: Pair of 1) NJD_print and 2) JPCommon_make_label.
The latter is the full-context labels in HTS-style format.
"""
global _global_jtalk
if _global_jtalk is None:
_lazy_init()
_global_jtalk = OpenJTalk(dn_mecab=OPEN_JTALK_DICT_DIR.encode(path_encoding))
return _global_jtalk.run_frontend(text, verbose)
def create_user_dict(path, out_path):
"""Create user dictionary
Args:
path (str): path to user csv
out_path (str): path to output dictionary
"""
global _global_jtalk
if _global_jtalk is None:
_lazy_init()
if not exists(path):
raise ValueError("no such file or directory: %s" % path)
CreateUserDict(OPEN_JTALK_DICT_DIR.encode(path_encoding), path.encode(path_encoding), out_path.encode(path_encoding))
def set_user_dict(path):
"""Apply user dictionary
Args:
path (str): path to user dictionary
"""
global _global_jtalk
if _global_jtalk is None:
_lazy_init()
if not exists(path):
raise ValueError("no such file or directory: %s" % path)
_global_jtalk = OpenJTalk(
dn_mecab=OPEN_JTALK_DICT_DIR.encode(path_encoding), user_mecab=path.encode(path_encoding)
)
| []
| []
| [
"OPEN_JTALK_DICT_DIR"
]
| [] | ["OPEN_JTALK_DICT_DIR"] | python | 1 | 0 | |
IBM Proactive Technology Online/ProtonOnWebServerAdmin/src/com/ibm/hrl/proton/admin/webapp/testing/GettingEnvVariables.java | /*******************************************************************************
* Copyright 2014 IBM
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.ibm.hrl.proton.admin.webapp.testing;
public class GettingEnvVariables {
public static void main(String[] args) {
try {
//System.out.println("CATALINA_HOME: " + System.getenv("CATALINA_HOME"));
System.out.println("file path separator " + System.getProperty("file.separator"));
System.out.println("server address: " + java.net.InetAddress.getLocalHost().getHostAddress());
System.out.println("server name: " + java.net.InetAddress.getLocalHost().getHostName());
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
| [
"\"CATALINA_HOME\""
]
| []
| [
"CATALINA_HOME"
]
| [] | ["CATALINA_HOME"] | java | 1 | 0 | |
vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2018-09-01/containerregistry/models.go | package containerregistry
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"encoding/json"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// The package's fully qualified name.
const fqdn = "github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2018-09-01/containerregistry"
// Action enumerates the values for action.
type Action string
const (
// Allow ...
Allow Action = "Allow"
)
// PossibleActionValues returns an array of possible values for the Action const type.
func PossibleActionValues() []Action {
return []Action{Allow}
}
// Architecture enumerates the values for architecture.
type Architecture string
const (
// Amd64 ...
Amd64 Architecture = "amd64"
// Arm ...
Arm Architecture = "arm"
// X86 ...
X86 Architecture = "x86"
)
// PossibleArchitectureValues returns an array of possible values for the Architecture const type.
func PossibleArchitectureValues() []Architecture {
return []Architecture{Amd64, Arm, X86}
}
// BaseImageDependencyType enumerates the values for base image dependency type.
type BaseImageDependencyType string
const (
// BuildTime ...
BuildTime BaseImageDependencyType = "BuildTime"
// RunTime ...
RunTime BaseImageDependencyType = "RunTime"
)
// PossibleBaseImageDependencyTypeValues returns an array of possible values for the BaseImageDependencyType const type.
func PossibleBaseImageDependencyTypeValues() []BaseImageDependencyType {
return []BaseImageDependencyType{BuildTime, RunTime}
}
// BaseImageTriggerType enumerates the values for base image trigger type.
type BaseImageTriggerType string
const (
// All ...
All BaseImageTriggerType = "All"
// Runtime ...
Runtime BaseImageTriggerType = "Runtime"
)
// PossibleBaseImageTriggerTypeValues returns an array of possible values for the BaseImageTriggerType const type.
func PossibleBaseImageTriggerTypeValues() []BaseImageTriggerType {
return []BaseImageTriggerType{All, Runtime}
}
// DefaultAction enumerates the values for default action.
type DefaultAction string
const (
// DefaultActionAllow ...
DefaultActionAllow DefaultAction = "Allow"
// DefaultActionDeny ...
DefaultActionDeny DefaultAction = "Deny"
)
// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type.
func PossibleDefaultActionValues() []DefaultAction {
return []DefaultAction{DefaultActionAllow, DefaultActionDeny}
}
// ImportMode enumerates the values for import mode.
type ImportMode string
const (
// Force ...
Force ImportMode = "Force"
// NoForce ...
NoForce ImportMode = "NoForce"
)
// PossibleImportModeValues returns an array of possible values for the ImportMode const type.
func PossibleImportModeValues() []ImportMode {
return []ImportMode{Force, NoForce}
}
// OS enumerates the values for os.
type OS string
const (
// Linux ...
Linux OS = "Linux"
// Windows ...
Windows OS = "Windows"
)
// PossibleOSValues returns an array of possible values for the OS const type.
func PossibleOSValues() []OS {
return []OS{Linux, Windows}
}
// PasswordName enumerates the values for password name.
type PasswordName string
const (
// Password ...
Password PasswordName = "password"
// Password2 ...
Password2 PasswordName = "password2"
)
// PossiblePasswordNameValues returns an array of possible values for the PasswordName const type.
func PossiblePasswordNameValues() []PasswordName {
return []PasswordName{Password, Password2}
}
// PolicyStatus enumerates the values for policy status.
type PolicyStatus string
const (
// Disabled ...
Disabled PolicyStatus = "disabled"
// Enabled ...
Enabled PolicyStatus = "enabled"
)
// PossiblePolicyStatusValues returns an array of possible values for the PolicyStatus const type.
func PossiblePolicyStatusValues() []PolicyStatus {
return []PolicyStatus{Disabled, Enabled}
}
// ProvisioningState enumerates the values for provisioning state.
type ProvisioningState string
const (
// Canceled ...
Canceled ProvisioningState = "Canceled"
// Creating ...
Creating ProvisioningState = "Creating"
// Deleting ...
Deleting ProvisioningState = "Deleting"
// Failed ...
Failed ProvisioningState = "Failed"
// Succeeded ...
Succeeded ProvisioningState = "Succeeded"
// Updating ...
Updating ProvisioningState = "Updating"
)
// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
func PossibleProvisioningStateValues() []ProvisioningState {
return []ProvisioningState{Canceled, Creating, Deleting, Failed, Succeeded, Updating}
}
// RegistryUsageUnit enumerates the values for registry usage unit.
type RegistryUsageUnit string
const (
// Bytes ...
Bytes RegistryUsageUnit = "Bytes"
// Count ...
Count RegistryUsageUnit = "Count"
)
// PossibleRegistryUsageUnitValues returns an array of possible values for the RegistryUsageUnit const type.
func PossibleRegistryUsageUnitValues() []RegistryUsageUnit {
return []RegistryUsageUnit{Bytes, Count}
}
// RunStatus enumerates the values for run status.
type RunStatus string
const (
// RunStatusCanceled ...
RunStatusCanceled RunStatus = "Canceled"
// RunStatusError ...
RunStatusError RunStatus = "Error"
// RunStatusFailed ...
RunStatusFailed RunStatus = "Failed"
// RunStatusQueued ...
RunStatusQueued RunStatus = "Queued"
// RunStatusRunning ...
RunStatusRunning RunStatus = "Running"
// RunStatusStarted ...
RunStatusStarted RunStatus = "Started"
// RunStatusSucceeded ...
RunStatusSucceeded RunStatus = "Succeeded"
// RunStatusTimeout ...
RunStatusTimeout RunStatus = "Timeout"
)
// PossibleRunStatusValues returns an array of possible values for the RunStatus const type.
func PossibleRunStatusValues() []RunStatus {
return []RunStatus{RunStatusCanceled, RunStatusError, RunStatusFailed, RunStatusQueued, RunStatusRunning, RunStatusStarted, RunStatusSucceeded, RunStatusTimeout}
}
// RunType enumerates the values for run type.
type RunType string
const (
// AutoBuild ...
AutoBuild RunType = "AutoBuild"
// AutoRun ...
AutoRun RunType = "AutoRun"
// QuickBuild ...
QuickBuild RunType = "QuickBuild"
// QuickRun ...
QuickRun RunType = "QuickRun"
)
// PossibleRunTypeValues returns an array of possible values for the RunType const type.
func PossibleRunTypeValues() []RunType {
return []RunType{AutoBuild, AutoRun, QuickBuild, QuickRun}
}
// SecretObjectType enumerates the values for secret object type.
type SecretObjectType string
const (
// Opaque ...
Opaque SecretObjectType = "Opaque"
)
// PossibleSecretObjectTypeValues returns an array of possible values for the SecretObjectType const type.
func PossibleSecretObjectTypeValues() []SecretObjectType {
return []SecretObjectType{Opaque}
}
// SkuName enumerates the values for sku name.
type SkuName string
const (
// Basic ...
Basic SkuName = "Basic"
// Classic ...
Classic SkuName = "Classic"
// Premium ...
Premium SkuName = "Premium"
// Standard ...
Standard SkuName = "Standard"
)
// PossibleSkuNameValues returns an array of possible values for the SkuName const type.
func PossibleSkuNameValues() []SkuName {
return []SkuName{Basic, Classic, Premium, Standard}
}
// SkuTier enumerates the values for sku tier.
type SkuTier string
const (
// SkuTierBasic ...
SkuTierBasic SkuTier = "Basic"
// SkuTierClassic ...
SkuTierClassic SkuTier = "Classic"
// SkuTierPremium ...
SkuTierPremium SkuTier = "Premium"
// SkuTierStandard ...
SkuTierStandard SkuTier = "Standard"
)
// PossibleSkuTierValues returns an array of possible values for the SkuTier const type.
func PossibleSkuTierValues() []SkuTier {
return []SkuTier{SkuTierBasic, SkuTierClassic, SkuTierPremium, SkuTierStandard}
}
// SourceControlType enumerates the values for source control type.
type SourceControlType string
const (
// Github ...
Github SourceControlType = "Github"
// VisualStudioTeamService ...
VisualStudioTeamService SourceControlType = "VisualStudioTeamService"
)
// PossibleSourceControlTypeValues returns an array of possible values for the SourceControlType const type.
func PossibleSourceControlTypeValues() []SourceControlType {
return []SourceControlType{Github, VisualStudioTeamService}
}
// SourceRegistryLoginMode enumerates the values for source registry login mode.
type SourceRegistryLoginMode string
const (
// Default ...
Default SourceRegistryLoginMode = "Default"
// None ...
None SourceRegistryLoginMode = "None"
)
// PossibleSourceRegistryLoginModeValues returns an array of possible values for the SourceRegistryLoginMode const type.
func PossibleSourceRegistryLoginModeValues() []SourceRegistryLoginMode {
return []SourceRegistryLoginMode{Default, None}
}
// SourceTriggerEvent enumerates the values for source trigger event.
type SourceTriggerEvent string
const (
// Commit ...
Commit SourceTriggerEvent = "commit"
// Pullrequest ...
Pullrequest SourceTriggerEvent = "pullrequest"
)
// PossibleSourceTriggerEventValues returns an array of possible values for the SourceTriggerEvent const type.
func PossibleSourceTriggerEventValues() []SourceTriggerEvent {
return []SourceTriggerEvent{Commit, Pullrequest}
}
// TaskStatus enumerates the values for task status.
type TaskStatus string
const (
// TaskStatusDisabled ...
TaskStatusDisabled TaskStatus = "Disabled"
// TaskStatusEnabled ...
TaskStatusEnabled TaskStatus = "Enabled"
)
// PossibleTaskStatusValues returns an array of possible values for the TaskStatus const type.
func PossibleTaskStatusValues() []TaskStatus {
return []TaskStatus{TaskStatusDisabled, TaskStatusEnabled}
}
// TokenType enumerates the values for token type.
type TokenType string
const (
// OAuth ...
OAuth TokenType = "OAuth"
// PAT ...
PAT TokenType = "PAT"
)
// PossibleTokenTypeValues returns an array of possible values for the TokenType const type.
func PossibleTokenTypeValues() []TokenType {
return []TokenType{OAuth, PAT}
}
// TriggerStatus enumerates the values for trigger status.
type TriggerStatus string
const (
// TriggerStatusDisabled ...
TriggerStatusDisabled TriggerStatus = "Disabled"
// TriggerStatusEnabled ...
TriggerStatusEnabled TriggerStatus = "Enabled"
)
// PossibleTriggerStatusValues returns an array of possible values for the TriggerStatus const type.
func PossibleTriggerStatusValues() []TriggerStatus {
return []TriggerStatus{TriggerStatusDisabled, TriggerStatusEnabled}
}
// TrustPolicyType enumerates the values for trust policy type.
type TrustPolicyType string
const (
// Notary ...
Notary TrustPolicyType = "Notary"
)
// PossibleTrustPolicyTypeValues returns an array of possible values for the TrustPolicyType const type.
func PossibleTrustPolicyTypeValues() []TrustPolicyType {
return []TrustPolicyType{Notary}
}
// Type enumerates the values for type.
type Type string
const (
// TypeDockerBuildRequest ...
TypeDockerBuildRequest Type = "DockerBuildRequest"
// TypeEncodedTaskRunRequest ...
TypeEncodedTaskRunRequest Type = "EncodedTaskRunRequest"
// TypeFileTaskRunRequest ...
TypeFileTaskRunRequest Type = "FileTaskRunRequest"
// TypeRunRequest ...
TypeRunRequest Type = "RunRequest"
// TypeTaskRunRequest ...
TypeTaskRunRequest Type = "TaskRunRequest"
)
// PossibleTypeValues returns an array of possible values for the Type const type.
func PossibleTypeValues() []Type {
return []Type{TypeDockerBuildRequest, TypeEncodedTaskRunRequest, TypeFileTaskRunRequest, TypeRunRequest, TypeTaskRunRequest}
}
// TypeBasicTaskStepProperties enumerates the values for type basic task step properties.
type TypeBasicTaskStepProperties string
const (
// TypeDocker ...
TypeDocker TypeBasicTaskStepProperties = "Docker"
// TypeEncodedTask ...
TypeEncodedTask TypeBasicTaskStepProperties = "EncodedTask"
// TypeFileTask ...
TypeFileTask TypeBasicTaskStepProperties = "FileTask"
// TypeTaskStepProperties ...
TypeTaskStepProperties TypeBasicTaskStepProperties = "TaskStepProperties"
)
// PossibleTypeBasicTaskStepPropertiesValues returns an array of possible values for the TypeBasicTaskStepProperties const type.
func PossibleTypeBasicTaskStepPropertiesValues() []TypeBasicTaskStepProperties {
return []TypeBasicTaskStepProperties{TypeDocker, TypeEncodedTask, TypeFileTask, TypeTaskStepProperties}
}
// TypeBasicTaskStepUpdateParameters enumerates the values for type basic task step update parameters.
type TypeBasicTaskStepUpdateParameters string
const (
// TypeBasicTaskStepUpdateParametersTypeDocker ...
TypeBasicTaskStepUpdateParametersTypeDocker TypeBasicTaskStepUpdateParameters = "Docker"
// TypeBasicTaskStepUpdateParametersTypeEncodedTask ...
TypeBasicTaskStepUpdateParametersTypeEncodedTask TypeBasicTaskStepUpdateParameters = "EncodedTask"
// TypeBasicTaskStepUpdateParametersTypeFileTask ...
TypeBasicTaskStepUpdateParametersTypeFileTask TypeBasicTaskStepUpdateParameters = "FileTask"
// TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters ...
TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters TypeBasicTaskStepUpdateParameters = "TaskStepUpdateParameters"
)
// PossibleTypeBasicTaskStepUpdateParametersValues returns an array of possible values for the TypeBasicTaskStepUpdateParameters const type.
func PossibleTypeBasicTaskStepUpdateParametersValues() []TypeBasicTaskStepUpdateParameters {
return []TypeBasicTaskStepUpdateParameters{TypeBasicTaskStepUpdateParametersTypeDocker, TypeBasicTaskStepUpdateParametersTypeEncodedTask, TypeBasicTaskStepUpdateParametersTypeFileTask, TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters}
}
// Variant enumerates the values for variant.
type Variant string
const (
// V6 ...
V6 Variant = "v6"
// V7 ...
V7 Variant = "v7"
// V8 ...
V8 Variant = "v8"
)
// PossibleVariantValues returns an array of possible values for the Variant const type.
func PossibleVariantValues() []Variant {
return []Variant{V6, V7, V8}
}
// WebhookAction enumerates the values for webhook action.
type WebhookAction string
const (
// ChartDelete ...
ChartDelete WebhookAction = "chart_delete"
// ChartPush ...
ChartPush WebhookAction = "chart_push"
// Delete ...
Delete WebhookAction = "delete"
// Push ...
Push WebhookAction = "push"
// Quarantine ...
Quarantine WebhookAction = "quarantine"
)
// PossibleWebhookActionValues returns an array of possible values for the WebhookAction const type.
func PossibleWebhookActionValues() []WebhookAction {
return []WebhookAction{ChartDelete, ChartPush, Delete, Push, Quarantine}
}
// WebhookStatus enumerates the values for webhook status.
type WebhookStatus string
const (
// WebhookStatusDisabled ...
WebhookStatusDisabled WebhookStatus = "disabled"
// WebhookStatusEnabled ...
WebhookStatusEnabled WebhookStatus = "enabled"
)
// PossibleWebhookStatusValues returns an array of possible values for the WebhookStatus const type.
func PossibleWebhookStatusValues() []WebhookStatus {
return []WebhookStatus{WebhookStatusDisabled, WebhookStatusEnabled}
}
// Actor the agent that initiated the event. For most situations, this could be from the authorization
// context of the request.
type Actor struct {
// Name - The subject or username associated with the request context that generated the event.
Name *string `json:"name,omitempty"`
}
// AgentProperties the properties that determine the run agent configuration.
type AgentProperties struct {
// CPU - The CPU configuration in terms of number of cores required for the run.
CPU *int32 `json:"cpu,omitempty"`
}
// Argument the properties of a run argument.
type Argument struct {
// Name - The name of the argument.
Name *string `json:"name,omitempty"`
// Value - The value of the argument.
Value *string `json:"value,omitempty"`
// IsSecret - Flag to indicate whether the argument represents a secret and want to be removed from build logs.
IsSecret *bool `json:"isSecret,omitempty"`
}
// AuthInfo the authorization properties for accessing the source code repository.
type AuthInfo struct {
// TokenType - The type of Auth token. Possible values include: 'PAT', 'OAuth'
TokenType TokenType `json:"tokenType,omitempty"`
// Token - The access token used to access the source control provider.
Token *string `json:"token,omitempty"`
// RefreshToken - The refresh token used to refresh the access token.
RefreshToken *string `json:"refreshToken,omitempty"`
// Scope - The scope of the access token.
Scope *string `json:"scope,omitempty"`
// ExpiresIn - Time in seconds that the token remains valid
ExpiresIn *int32 `json:"expiresIn,omitempty"`
}
// AuthInfoUpdateParameters the authorization properties for accessing the source code repository.
type AuthInfoUpdateParameters struct {
// TokenType - The type of Auth token. Possible values include: 'PAT', 'OAuth'
TokenType TokenType `json:"tokenType,omitempty"`
// Token - The access token used to access the source control provider.
Token *string `json:"token,omitempty"`
// RefreshToken - The refresh token used to refresh the access token.
RefreshToken *string `json:"refreshToken,omitempty"`
// Scope - The scope of the access token.
Scope *string `json:"scope,omitempty"`
// ExpiresIn - Time in seconds that the token remains valid
ExpiresIn *int32 `json:"expiresIn,omitempty"`
}
// BaseImageDependency properties that describe a base image dependency.
type BaseImageDependency struct {
// Type - The type of the base image dependency. Possible values include: 'BuildTime', 'RunTime'
Type BaseImageDependencyType `json:"type,omitempty"`
// Registry - The registry login server.
Registry *string `json:"registry,omitempty"`
// Repository - The repository name.
Repository *string `json:"repository,omitempty"`
// Tag - The tag name.
Tag *string `json:"tag,omitempty"`
// Digest - The sha256-based digest of the image manifest.
Digest *string `json:"digest,omitempty"`
}
// BaseImageTrigger the trigger based on base image dependency.
type BaseImageTrigger struct {
// BaseImageTriggerType - The type of the auto trigger for base image dependency updates. Possible values include: 'All', 'Runtime'
BaseImageTriggerType BaseImageTriggerType `json:"baseImageTriggerType,omitempty"`
// Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled'
Status TriggerStatus `json:"status,omitempty"`
// Name - The name of the trigger.
Name *string `json:"name,omitempty"`
}
// BaseImageTriggerUpdateParameters the properties for updating base image dependency trigger.
type BaseImageTriggerUpdateParameters struct {
// BaseImageTriggerType - The type of the auto trigger for base image dependency updates. Possible values include: 'All', 'Runtime'
BaseImageTriggerType BaseImageTriggerType `json:"baseImageTriggerType,omitempty"`
// Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled'
Status TriggerStatus `json:"status,omitempty"`
// Name - The name of the trigger.
Name *string `json:"name,omitempty"`
}
// CallbackConfig the configuration of service URI and custom headers for the webhook.
type CallbackConfig struct {
autorest.Response `json:"-"`
// ServiceURI - The service URI for the webhook to post notifications.
ServiceURI *string `json:"serviceUri,omitempty"`
// CustomHeaders - Custom headers that will be added to the webhook notifications.
CustomHeaders map[string]*string `json:"customHeaders"`
}
// MarshalJSON is the custom marshaler for CallbackConfig.
func (cc CallbackConfig) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if cc.ServiceURI != nil {
objectMap["serviceUri"] = cc.ServiceURI
}
if cc.CustomHeaders != nil {
objectMap["customHeaders"] = cc.CustomHeaders
}
return json.Marshal(objectMap)
}
// Credentials the parameters that describes a set of credentials that will be used when a run is invoked.
type Credentials struct {
// SourceRegistry - Describes the credential parameters for accessing the source registry.
SourceRegistry *SourceRegistryCredentials `json:"sourceRegistry,omitempty"`
// CustomRegistries - Describes the credential parameters for accessing other custom registries. The key
// for the dictionary item will be the registry login server (myregistry.azurecr.io) and
// the value of the item will be the registry credentials for accessing the registry.
CustomRegistries map[string]*CustomRegistryCredentials `json:"customRegistries"`
}
// MarshalJSON is the custom marshaler for Credentials.
func (c Credentials) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if c.SourceRegistry != nil {
objectMap["sourceRegistry"] = c.SourceRegistry
}
if c.CustomRegistries != nil {
objectMap["customRegistries"] = c.CustomRegistries
}
return json.Marshal(objectMap)
}
// CustomRegistryCredentials describes the credentials that will be used to access a custom registry during
// a run.
type CustomRegistryCredentials struct {
// UserName - The username for logging into the custom registry.
UserName *SecretObject `json:"userName,omitempty"`
// Password - The password for logging into the custom registry. The password is a secret
// object that allows multiple ways of providing the value for it.
Password *SecretObject `json:"password,omitempty"`
}
// DockerBuildRequest the parameters for a docker quick build.
type DockerBuildRequest struct {
// ImageNames - The fully qualified image names including the repository and tag.
ImageNames *[]string `json:"imageNames,omitempty"`
// IsPushEnabled - The value of this property indicates whether the image built should be pushed to the registry or not.
IsPushEnabled *bool `json:"isPushEnabled,omitempty"`
// NoCache - The value of this property indicates whether the image cache is enabled or not.
NoCache *bool `json:"noCache,omitempty"`
// DockerFilePath - The Docker file path relative to the source location.
DockerFilePath *string `json:"dockerFilePath,omitempty"`
// Target - The name of the target build stage for the docker build.
Target *string `json:"target,omitempty"`
// Arguments - The collection of override arguments to be used when executing the run.
Arguments *[]Argument `json:"arguments,omitempty"`
// Timeout - Run timeout in seconds.
Timeout *int32 `json:"timeout,omitempty"`
// Platform - The platform properties against which the run has to happen.
Platform *PlatformProperties `json:"platform,omitempty"`
// AgentConfiguration - The machine configuration of the run agent.
AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"`
// SourceLocation - The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
// If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
SourceLocation *string `json:"sourceLocation,omitempty"`
// Credentials - The properties that describes a set of credentials that will be used when this run is invoked.
Credentials *Credentials `json:"credentials,omitempty"`
// IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
// Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest'
Type Type `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for DockerBuildRequest.
func (dbr DockerBuildRequest) MarshalJSON() ([]byte, error) {
dbr.Type = TypeDockerBuildRequest
objectMap := make(map[string]interface{})
if dbr.ImageNames != nil {
objectMap["imageNames"] = dbr.ImageNames
}
if dbr.IsPushEnabled != nil {
objectMap["isPushEnabled"] = dbr.IsPushEnabled
}
if dbr.NoCache != nil {
objectMap["noCache"] = dbr.NoCache
}
if dbr.DockerFilePath != nil {
objectMap["dockerFilePath"] = dbr.DockerFilePath
}
if dbr.Target != nil {
objectMap["target"] = dbr.Target
}
if dbr.Arguments != nil {
objectMap["arguments"] = dbr.Arguments
}
if dbr.Timeout != nil {
objectMap["timeout"] = dbr.Timeout
}
if dbr.Platform != nil {
objectMap["platform"] = dbr.Platform
}
if dbr.AgentConfiguration != nil {
objectMap["agentConfiguration"] = dbr.AgentConfiguration
}
if dbr.SourceLocation != nil {
objectMap["sourceLocation"] = dbr.SourceLocation
}
if dbr.Credentials != nil {
objectMap["credentials"] = dbr.Credentials
}
if dbr.IsArchiveEnabled != nil {
objectMap["isArchiveEnabled"] = dbr.IsArchiveEnabled
}
if dbr.Type != "" {
objectMap["type"] = dbr.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildRequest is the BasicRunRequest implementation for DockerBuildRequest.
func (dbr DockerBuildRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) {
return &dbr, true
}
// AsFileTaskRunRequest is the BasicRunRequest implementation for DockerBuildRequest.
func (dbr DockerBuildRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) {
return nil, false
}
// AsTaskRunRequest is the BasicRunRequest implementation for DockerBuildRequest.
func (dbr DockerBuildRequest) AsTaskRunRequest() (*TaskRunRequest, bool) {
return nil, false
}
// AsEncodedTaskRunRequest is the BasicRunRequest implementation for DockerBuildRequest.
func (dbr DockerBuildRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) {
return nil, false
}
// AsRunRequest is the BasicRunRequest implementation for DockerBuildRequest.
func (dbr DockerBuildRequest) AsRunRequest() (*RunRequest, bool) {
return nil, false
}
// AsBasicRunRequest is the BasicRunRequest implementation for DockerBuildRequest.
func (dbr DockerBuildRequest) AsBasicRunRequest() (BasicRunRequest, bool) {
return &dbr, true
}
// DockerBuildStep the Docker build step.
type DockerBuildStep struct {
// ImageNames - The fully qualified image names including the repository and tag.
ImageNames *[]string `json:"imageNames,omitempty"`
// IsPushEnabled - The value of this property indicates whether the image built should be pushed to the registry or not.
IsPushEnabled *bool `json:"isPushEnabled,omitempty"`
// NoCache - The value of this property indicates whether the image cache is enabled or not.
NoCache *bool `json:"noCache,omitempty"`
// DockerFilePath - The Docker file path relative to the source context.
DockerFilePath *string `json:"dockerFilePath,omitempty"`
// Target - The name of the target build stage for the docker build.
Target *string `json:"target,omitempty"`
// Arguments - The collection of override arguments to be used when executing this build step.
Arguments *[]Argument `json:"arguments,omitempty"`
// BaseImageDependencies - List of base image dependencies for a step.
BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"`
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask'
Type TypeBasicTaskStepProperties `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for DockerBuildStep.
func (dbs DockerBuildStep) MarshalJSON() ([]byte, error) {
dbs.Type = TypeDocker
objectMap := make(map[string]interface{})
if dbs.ImageNames != nil {
objectMap["imageNames"] = dbs.ImageNames
}
if dbs.IsPushEnabled != nil {
objectMap["isPushEnabled"] = dbs.IsPushEnabled
}
if dbs.NoCache != nil {
objectMap["noCache"] = dbs.NoCache
}
if dbs.DockerFilePath != nil {
objectMap["dockerFilePath"] = dbs.DockerFilePath
}
if dbs.Target != nil {
objectMap["target"] = dbs.Target
}
if dbs.Arguments != nil {
objectMap["arguments"] = dbs.Arguments
}
if dbs.BaseImageDependencies != nil {
objectMap["baseImageDependencies"] = dbs.BaseImageDependencies
}
if dbs.ContextPath != nil {
objectMap["contextPath"] = dbs.ContextPath
}
if dbs.ContextAccessToken != nil {
objectMap["contextAccessToken"] = dbs.ContextAccessToken
}
if dbs.Type != "" {
objectMap["type"] = dbs.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStep is the BasicTaskStepProperties implementation for DockerBuildStep.
func (dbs DockerBuildStep) AsDockerBuildStep() (*DockerBuildStep, bool) {
return &dbs, true
}
// AsFileTaskStep is the BasicTaskStepProperties implementation for DockerBuildStep.
func (dbs DockerBuildStep) AsFileTaskStep() (*FileTaskStep, bool) {
return nil, false
}
// AsEncodedTaskStep is the BasicTaskStepProperties implementation for DockerBuildStep.
func (dbs DockerBuildStep) AsEncodedTaskStep() (*EncodedTaskStep, bool) {
return nil, false
}
// AsTaskStepProperties is the BasicTaskStepProperties implementation for DockerBuildStep.
func (dbs DockerBuildStep) AsTaskStepProperties() (*TaskStepProperties, bool) {
return nil, false
}
// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for DockerBuildStep.
func (dbs DockerBuildStep) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) {
return &dbs, true
}
// DockerBuildStepUpdateParameters the properties for updating a docker build step.
type DockerBuildStepUpdateParameters struct {
// ImageNames - The fully qualified image names including the repository and tag.
ImageNames *[]string `json:"imageNames,omitempty"`
// IsPushEnabled - The value of this property indicates whether the image built should be pushed to the registry or not.
IsPushEnabled *bool `json:"isPushEnabled,omitempty"`
// NoCache - The value of this property indicates whether the image cache is enabled or not.
NoCache *bool `json:"noCache,omitempty"`
// DockerFilePath - The Docker file path relative to the source context.
DockerFilePath *string `json:"dockerFilePath,omitempty"`
// Arguments - The collection of override arguments to be used when executing this build step.
Arguments *[]Argument `json:"arguments,omitempty"`
// Target - The name of the target build stage for the docker build.
Target *string `json:"target,omitempty"`
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask'
Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for DockerBuildStepUpdateParameters.
func (dbsup DockerBuildStepUpdateParameters) MarshalJSON() ([]byte, error) {
dbsup.Type = TypeBasicTaskStepUpdateParametersTypeDocker
objectMap := make(map[string]interface{})
if dbsup.ImageNames != nil {
objectMap["imageNames"] = dbsup.ImageNames
}
if dbsup.IsPushEnabled != nil {
objectMap["isPushEnabled"] = dbsup.IsPushEnabled
}
if dbsup.NoCache != nil {
objectMap["noCache"] = dbsup.NoCache
}
if dbsup.DockerFilePath != nil {
objectMap["dockerFilePath"] = dbsup.DockerFilePath
}
if dbsup.Arguments != nil {
objectMap["arguments"] = dbsup.Arguments
}
if dbsup.Target != nil {
objectMap["target"] = dbsup.Target
}
if dbsup.ContextPath != nil {
objectMap["contextPath"] = dbsup.ContextPath
}
if dbsup.ContextAccessToken != nil {
objectMap["contextAccessToken"] = dbsup.ContextAccessToken
}
if dbsup.Type != "" {
objectMap["type"] = dbsup.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters.
func (dbsup DockerBuildStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) {
return &dbsup, true
}
// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters.
func (dbsup DockerBuildStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) {
return nil, false
}
// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters.
func (dbsup DockerBuildStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) {
return nil, false
}
// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters.
func (dbsup DockerBuildStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) {
return nil, false
}
// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters.
func (dbsup DockerBuildStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) {
return &dbsup, true
}
// EncodedTaskRunRequest the parameters for a quick task run request.
type EncodedTaskRunRequest struct {
// EncodedTaskContent - Base64 encoded value of the template/definition file content.
EncodedTaskContent *string `json:"encodedTaskContent,omitempty"`
// EncodedValuesContent - Base64 encoded value of the parameters/values file content.
EncodedValuesContent *string `json:"encodedValuesContent,omitempty"`
// Values - The collection of overridable values that can be passed when running a task.
Values *[]SetValue `json:"values,omitempty"`
// Timeout - Run timeout in seconds.
Timeout *int32 `json:"timeout,omitempty"`
// Platform - The platform properties against which the run has to happen.
Platform *PlatformProperties `json:"platform,omitempty"`
// AgentConfiguration - The machine configuration of the run agent.
AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"`
// SourceLocation - The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
// If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
SourceLocation *string `json:"sourceLocation,omitempty"`
// Credentials - The properties that describes a set of credentials that will be used when this run is invoked.
Credentials *Credentials `json:"credentials,omitempty"`
// IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
// Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest'
Type Type `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for EncodedTaskRunRequest.
func (etrr EncodedTaskRunRequest) MarshalJSON() ([]byte, error) {
etrr.Type = TypeEncodedTaskRunRequest
objectMap := make(map[string]interface{})
if etrr.EncodedTaskContent != nil {
objectMap["encodedTaskContent"] = etrr.EncodedTaskContent
}
if etrr.EncodedValuesContent != nil {
objectMap["encodedValuesContent"] = etrr.EncodedValuesContent
}
if etrr.Values != nil {
objectMap["values"] = etrr.Values
}
if etrr.Timeout != nil {
objectMap["timeout"] = etrr.Timeout
}
if etrr.Platform != nil {
objectMap["platform"] = etrr.Platform
}
if etrr.AgentConfiguration != nil {
objectMap["agentConfiguration"] = etrr.AgentConfiguration
}
if etrr.SourceLocation != nil {
objectMap["sourceLocation"] = etrr.SourceLocation
}
if etrr.Credentials != nil {
objectMap["credentials"] = etrr.Credentials
}
if etrr.IsArchiveEnabled != nil {
objectMap["isArchiveEnabled"] = etrr.IsArchiveEnabled
}
if etrr.Type != "" {
objectMap["type"] = etrr.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildRequest is the BasicRunRequest implementation for EncodedTaskRunRequest.
func (etrr EncodedTaskRunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) {
return nil, false
}
// AsFileTaskRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest.
func (etrr EncodedTaskRunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) {
return nil, false
}
// AsTaskRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest.
func (etrr EncodedTaskRunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) {
return nil, false
}
// AsEncodedTaskRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest.
func (etrr EncodedTaskRunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) {
return &etrr, true
}
// AsRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest.
func (etrr EncodedTaskRunRequest) AsRunRequest() (*RunRequest, bool) {
return nil, false
}
// AsBasicRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest.
func (etrr EncodedTaskRunRequest) AsBasicRunRequest() (BasicRunRequest, bool) {
return &etrr, true
}
// EncodedTaskStep the properties of a encoded task step.
type EncodedTaskStep struct {
// EncodedTaskContent - Base64 encoded value of the template/definition file content.
EncodedTaskContent *string `json:"encodedTaskContent,omitempty"`
// EncodedValuesContent - Base64 encoded value of the parameters/values file content.
EncodedValuesContent *string `json:"encodedValuesContent,omitempty"`
// Values - The collection of overridable values that can be passed when running a task.
Values *[]SetValue `json:"values,omitempty"`
// BaseImageDependencies - List of base image dependencies for a step.
BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"`
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask'
Type TypeBasicTaskStepProperties `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for EncodedTaskStep.
func (ets EncodedTaskStep) MarshalJSON() ([]byte, error) {
ets.Type = TypeEncodedTask
objectMap := make(map[string]interface{})
if ets.EncodedTaskContent != nil {
objectMap["encodedTaskContent"] = ets.EncodedTaskContent
}
if ets.EncodedValuesContent != nil {
objectMap["encodedValuesContent"] = ets.EncodedValuesContent
}
if ets.Values != nil {
objectMap["values"] = ets.Values
}
if ets.BaseImageDependencies != nil {
objectMap["baseImageDependencies"] = ets.BaseImageDependencies
}
if ets.ContextPath != nil {
objectMap["contextPath"] = ets.ContextPath
}
if ets.ContextAccessToken != nil {
objectMap["contextAccessToken"] = ets.ContextAccessToken
}
if ets.Type != "" {
objectMap["type"] = ets.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStep is the BasicTaskStepProperties implementation for EncodedTaskStep.
func (ets EncodedTaskStep) AsDockerBuildStep() (*DockerBuildStep, bool) {
return nil, false
}
// AsFileTaskStep is the BasicTaskStepProperties implementation for EncodedTaskStep.
func (ets EncodedTaskStep) AsFileTaskStep() (*FileTaskStep, bool) {
return nil, false
}
// AsEncodedTaskStep is the BasicTaskStepProperties implementation for EncodedTaskStep.
func (ets EncodedTaskStep) AsEncodedTaskStep() (*EncodedTaskStep, bool) {
return &ets, true
}
// AsTaskStepProperties is the BasicTaskStepProperties implementation for EncodedTaskStep.
func (ets EncodedTaskStep) AsTaskStepProperties() (*TaskStepProperties, bool) {
return nil, false
}
// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for EncodedTaskStep.
func (ets EncodedTaskStep) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) {
return &ets, true
}
// EncodedTaskStepUpdateParameters the properties for updating encoded task step.
type EncodedTaskStepUpdateParameters struct {
// EncodedTaskContent - Base64 encoded value of the template/definition file content.
EncodedTaskContent *string `json:"encodedTaskContent,omitempty"`
// EncodedValuesContent - Base64 encoded value of the parameters/values file content.
EncodedValuesContent *string `json:"encodedValuesContent,omitempty"`
// Values - The collection of overridable values that can be passed when running a task.
Values *[]SetValue `json:"values,omitempty"`
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask'
Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for EncodedTaskStepUpdateParameters.
func (etsup EncodedTaskStepUpdateParameters) MarshalJSON() ([]byte, error) {
etsup.Type = TypeBasicTaskStepUpdateParametersTypeEncodedTask
objectMap := make(map[string]interface{})
if etsup.EncodedTaskContent != nil {
objectMap["encodedTaskContent"] = etsup.EncodedTaskContent
}
if etsup.EncodedValuesContent != nil {
objectMap["encodedValuesContent"] = etsup.EncodedValuesContent
}
if etsup.Values != nil {
objectMap["values"] = etsup.Values
}
if etsup.ContextPath != nil {
objectMap["contextPath"] = etsup.ContextPath
}
if etsup.ContextAccessToken != nil {
objectMap["contextAccessToken"] = etsup.ContextAccessToken
}
if etsup.Type != "" {
objectMap["type"] = etsup.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters.
func (etsup EncodedTaskStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) {
return nil, false
}
// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters.
func (etsup EncodedTaskStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) {
return nil, false
}
// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters.
func (etsup EncodedTaskStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) {
return &etsup, true
}
// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters.
func (etsup EncodedTaskStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) {
return nil, false
}
// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters.
func (etsup EncodedTaskStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) {
return &etsup, true
}
// Event the event for a webhook.
type Event struct {
// EventRequestMessage - The event request message sent to the service URI.
EventRequestMessage *EventRequestMessage `json:"eventRequestMessage,omitempty"`
// EventResponseMessage - The event response message received from the service URI.
EventResponseMessage *EventResponseMessage `json:"eventResponseMessage,omitempty"`
// ID - The event ID.
ID *string `json:"id,omitempty"`
}
// EventContent the content of the event request message.
type EventContent struct {
// ID - The event ID.
ID *string `json:"id,omitempty"`
// Timestamp - The time at which the event occurred.
Timestamp *date.Time `json:"timestamp,omitempty"`
// Action - The action that encompasses the provided event.
Action *string `json:"action,omitempty"`
// Target - The target of the event.
Target *Target `json:"target,omitempty"`
// Request - The request that generated the event.
Request *Request `json:"request,omitempty"`
// Actor - The agent that initiated the event. For most situations, this could be from the authorization context of the request.
Actor *Actor `json:"actor,omitempty"`
// Source - The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
Source *Source `json:"source,omitempty"`
}
// EventInfo the basic information of an event.
type EventInfo struct {
autorest.Response `json:"-"`
// ID - The event ID.
ID *string `json:"id,omitempty"`
}
// EventListResult the result of a request to list events for a webhook.
type EventListResult struct {
autorest.Response `json:"-"`
// Value - The list of events. Since this list may be incomplete, the nextLink field should be used to request the next list of events.
Value *[]Event `json:"value,omitempty"`
// NextLink - The URI that can be used to request the next list of events.
NextLink *string `json:"nextLink,omitempty"`
}
// EventListResultIterator provides access to a complete listing of Event values.
type EventListResultIterator struct {
i int
page EventListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *EventListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EventListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *EventListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter EventListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter EventListResultIterator) Response() EventListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter EventListResultIterator) Value() Event {
if !iter.page.NotDone() {
return Event{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the EventListResultIterator type.
func NewEventListResultIterator(page EventListResultPage) EventListResultIterator {
return EventListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (elr EventListResult) IsEmpty() bool {
return elr.Value == nil || len(*elr.Value) == 0
}
// eventListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (elr EventListResult) eventListResultPreparer(ctx context.Context) (*http.Request, error) {
if elr.NextLink == nil || len(to.String(elr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(elr.NextLink)))
}
// EventListResultPage contains a page of Event values.
type EventListResultPage struct {
fn func(context.Context, EventListResult) (EventListResult, error)
elr EventListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *EventListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EventListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.elr)
if err != nil {
return err
}
page.elr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *EventListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page EventListResultPage) NotDone() bool {
return !page.elr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page EventListResultPage) Response() EventListResult {
return page.elr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page EventListResultPage) Values() []Event {
if page.elr.IsEmpty() {
return nil
}
return *page.elr.Value
}
// Creates a new instance of the EventListResultPage type.
func NewEventListResultPage(getNextPage func(context.Context, EventListResult) (EventListResult, error)) EventListResultPage {
return EventListResultPage{fn: getNextPage}
}
// EventRequestMessage the event request message sent to the service URI.
type EventRequestMessage struct {
// Content - The content of the event request message.
Content *EventContent `json:"content,omitempty"`
// Headers - The headers of the event request message.
Headers map[string]*string `json:"headers"`
// Method - The HTTP method used to send the event request message.
Method *string `json:"method,omitempty"`
// RequestURI - The URI used to send the event request message.
RequestURI *string `json:"requestUri,omitempty"`
// Version - The HTTP message version.
Version *string `json:"version,omitempty"`
}
// MarshalJSON is the custom marshaler for EventRequestMessage.
func (erm EventRequestMessage) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if erm.Content != nil {
objectMap["content"] = erm.Content
}
if erm.Headers != nil {
objectMap["headers"] = erm.Headers
}
if erm.Method != nil {
objectMap["method"] = erm.Method
}
if erm.RequestURI != nil {
objectMap["requestUri"] = erm.RequestURI
}
if erm.Version != nil {
objectMap["version"] = erm.Version
}
return json.Marshal(objectMap)
}
// EventResponseMessage the event response message received from the service URI.
type EventResponseMessage struct {
// Content - The content of the event response message.
Content *string `json:"content,omitempty"`
// Headers - The headers of the event response message.
Headers map[string]*string `json:"headers"`
// ReasonPhrase - The reason phrase of the event response message.
ReasonPhrase *string `json:"reasonPhrase,omitempty"`
// StatusCode - The status code of the event response message.
StatusCode *string `json:"statusCode,omitempty"`
// Version - The HTTP message version.
Version *string `json:"version,omitempty"`
}
// MarshalJSON is the custom marshaler for EventResponseMessage.
func (erm EventResponseMessage) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if erm.Content != nil {
objectMap["content"] = erm.Content
}
if erm.Headers != nil {
objectMap["headers"] = erm.Headers
}
if erm.ReasonPhrase != nil {
objectMap["reasonPhrase"] = erm.ReasonPhrase
}
if erm.StatusCode != nil {
objectMap["statusCode"] = erm.StatusCode
}
if erm.Version != nil {
objectMap["version"] = erm.Version
}
return json.Marshal(objectMap)
}
// FileTaskRunRequest the request parameters for a scheduling run against a task file.
type FileTaskRunRequest struct {
// TaskFilePath - The template/definition file path relative to the source.
TaskFilePath *string `json:"taskFilePath,omitempty"`
// ValuesFilePath - The values/parameters file path relative to the source.
ValuesFilePath *string `json:"valuesFilePath,omitempty"`
// Values - The collection of overridable values that can be passed when running a task.
Values *[]SetValue `json:"values,omitempty"`
// Timeout - Run timeout in seconds.
Timeout *int32 `json:"timeout,omitempty"`
// Platform - The platform properties against which the run has to happen.
Platform *PlatformProperties `json:"platform,omitempty"`
// AgentConfiguration - The machine configuration of the run agent.
AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"`
// SourceLocation - The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
// If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
SourceLocation *string `json:"sourceLocation,omitempty"`
// Credentials - The properties that describes a set of credentials that will be used when this run is invoked.
Credentials *Credentials `json:"credentials,omitempty"`
// IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
// Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest'
Type Type `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for FileTaskRunRequest.
func (ftrr FileTaskRunRequest) MarshalJSON() ([]byte, error) {
ftrr.Type = TypeFileTaskRunRequest
objectMap := make(map[string]interface{})
if ftrr.TaskFilePath != nil {
objectMap["taskFilePath"] = ftrr.TaskFilePath
}
if ftrr.ValuesFilePath != nil {
objectMap["valuesFilePath"] = ftrr.ValuesFilePath
}
if ftrr.Values != nil {
objectMap["values"] = ftrr.Values
}
if ftrr.Timeout != nil {
objectMap["timeout"] = ftrr.Timeout
}
if ftrr.Platform != nil {
objectMap["platform"] = ftrr.Platform
}
if ftrr.AgentConfiguration != nil {
objectMap["agentConfiguration"] = ftrr.AgentConfiguration
}
if ftrr.SourceLocation != nil {
objectMap["sourceLocation"] = ftrr.SourceLocation
}
if ftrr.Credentials != nil {
objectMap["credentials"] = ftrr.Credentials
}
if ftrr.IsArchiveEnabled != nil {
objectMap["isArchiveEnabled"] = ftrr.IsArchiveEnabled
}
if ftrr.Type != "" {
objectMap["type"] = ftrr.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildRequest is the BasicRunRequest implementation for FileTaskRunRequest.
func (ftrr FileTaskRunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) {
return nil, false
}
// AsFileTaskRunRequest is the BasicRunRequest implementation for FileTaskRunRequest.
func (ftrr FileTaskRunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) {
return &ftrr, true
}
// AsTaskRunRequest is the BasicRunRequest implementation for FileTaskRunRequest.
func (ftrr FileTaskRunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) {
return nil, false
}
// AsEncodedTaskRunRequest is the BasicRunRequest implementation for FileTaskRunRequest.
func (ftrr FileTaskRunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) {
return nil, false
}
// AsRunRequest is the BasicRunRequest implementation for FileTaskRunRequest.
func (ftrr FileTaskRunRequest) AsRunRequest() (*RunRequest, bool) {
return nil, false
}
// AsBasicRunRequest is the BasicRunRequest implementation for FileTaskRunRequest.
func (ftrr FileTaskRunRequest) AsBasicRunRequest() (BasicRunRequest, bool) {
return &ftrr, true
}
// FileTaskStep the properties of a task step.
type FileTaskStep struct {
// TaskFilePath - The task template/definition file path relative to the source context.
TaskFilePath *string `json:"taskFilePath,omitempty"`
// ValuesFilePath - The task values/parameters file path relative to the source context.
ValuesFilePath *string `json:"valuesFilePath,omitempty"`
// Values - The collection of overridable values that can be passed when running a task.
Values *[]SetValue `json:"values,omitempty"`
// BaseImageDependencies - List of base image dependencies for a step.
BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"`
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask'
Type TypeBasicTaskStepProperties `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for FileTaskStep.
func (fts FileTaskStep) MarshalJSON() ([]byte, error) {
fts.Type = TypeFileTask
objectMap := make(map[string]interface{})
if fts.TaskFilePath != nil {
objectMap["taskFilePath"] = fts.TaskFilePath
}
if fts.ValuesFilePath != nil {
objectMap["valuesFilePath"] = fts.ValuesFilePath
}
if fts.Values != nil {
objectMap["values"] = fts.Values
}
if fts.BaseImageDependencies != nil {
objectMap["baseImageDependencies"] = fts.BaseImageDependencies
}
if fts.ContextPath != nil {
objectMap["contextPath"] = fts.ContextPath
}
if fts.ContextAccessToken != nil {
objectMap["contextAccessToken"] = fts.ContextAccessToken
}
if fts.Type != "" {
objectMap["type"] = fts.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStep is the BasicTaskStepProperties implementation for FileTaskStep.
func (fts FileTaskStep) AsDockerBuildStep() (*DockerBuildStep, bool) {
return nil, false
}
// AsFileTaskStep is the BasicTaskStepProperties implementation for FileTaskStep.
func (fts FileTaskStep) AsFileTaskStep() (*FileTaskStep, bool) {
return &fts, true
}
// AsEncodedTaskStep is the BasicTaskStepProperties implementation for FileTaskStep.
func (fts FileTaskStep) AsEncodedTaskStep() (*EncodedTaskStep, bool) {
return nil, false
}
// AsTaskStepProperties is the BasicTaskStepProperties implementation for FileTaskStep.
func (fts FileTaskStep) AsTaskStepProperties() (*TaskStepProperties, bool) {
return nil, false
}
// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for FileTaskStep.
func (fts FileTaskStep) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) {
return &fts, true
}
// FileTaskStepUpdateParameters the properties of updating a task step.
type FileTaskStepUpdateParameters struct {
// TaskFilePath - The task template/definition file path relative to the source context.
TaskFilePath *string `json:"taskFilePath,omitempty"`
// ValuesFilePath - The values/parameters file path relative to the source context.
ValuesFilePath *string `json:"valuesFilePath,omitempty"`
// Values - The collection of overridable values that can be passed when running a task.
Values *[]SetValue `json:"values,omitempty"`
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask'
Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for FileTaskStepUpdateParameters.
func (ftsup FileTaskStepUpdateParameters) MarshalJSON() ([]byte, error) {
ftsup.Type = TypeBasicTaskStepUpdateParametersTypeFileTask
objectMap := make(map[string]interface{})
if ftsup.TaskFilePath != nil {
objectMap["taskFilePath"] = ftsup.TaskFilePath
}
if ftsup.ValuesFilePath != nil {
objectMap["valuesFilePath"] = ftsup.ValuesFilePath
}
if ftsup.Values != nil {
objectMap["values"] = ftsup.Values
}
if ftsup.ContextPath != nil {
objectMap["contextPath"] = ftsup.ContextPath
}
if ftsup.ContextAccessToken != nil {
objectMap["contextAccessToken"] = ftsup.ContextAccessToken
}
if ftsup.Type != "" {
objectMap["type"] = ftsup.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters.
func (ftsup FileTaskStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) {
return nil, false
}
// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters.
func (ftsup FileTaskStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) {
return &ftsup, true
}
// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters.
func (ftsup FileTaskStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) {
return nil, false
}
// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters.
func (ftsup FileTaskStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) {
return nil, false
}
// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters.
func (ftsup FileTaskStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) {
return &ftsup, true
}
// ImageDescriptor properties for a registry image.
type ImageDescriptor struct {
// Registry - The registry login server.
Registry *string `json:"registry,omitempty"`
// Repository - The repository name.
Repository *string `json:"repository,omitempty"`
// Tag - The tag name.
Tag *string `json:"tag,omitempty"`
// Digest - The sha256-based digest of the image manifest.
Digest *string `json:"digest,omitempty"`
}
// ImageUpdateTrigger the image update trigger that caused a build.
type ImageUpdateTrigger struct {
// ID - The unique ID of the trigger.
ID *string `json:"id,omitempty"`
// Timestamp - The timestamp when the image update happened.
Timestamp *date.Time `json:"timestamp,omitempty"`
// Images - The list of image updates that caused the build.
Images *[]ImageDescriptor `json:"images,omitempty"`
}
// ImportImageParameters ...
type ImportImageParameters struct {
// Source - The source of the image.
Source *ImportSource `json:"source,omitempty"`
// TargetTags - List of strings of the form repo[:tag]. When tag is omitted the source will be used (or 'latest' if source tag is also omitted).
TargetTags *[]string `json:"targetTags,omitempty"`
// UntaggedTargetRepositories - List of strings of repository names to do a manifest only copy. No tag will be created.
UntaggedTargetRepositories *[]string `json:"untaggedTargetRepositories,omitempty"`
// Mode - When Force, any existing target tags will be overwritten. When NoForce, any existing target tags will fail the operation before any copying begins. Possible values include: 'NoForce', 'Force'
Mode ImportMode `json:"mode,omitempty"`
}
// ImportSource ...
type ImportSource struct {
// ResourceID - The resource identifier of the source Azure Container Registry.
ResourceID *string `json:"resourceId,omitempty"`
// RegistryURI - The address of the source registry (e.g. 'mcr.microsoft.com').
RegistryURI *string `json:"registryUri,omitempty"`
// Credentials - Credentials used when importing from a registry uri.
Credentials *ImportSourceCredentials `json:"credentials,omitempty"`
// SourceImage - Repository name of the source image.
// Specify an image by repository ('hello-world'). This will use the 'latest' tag.
// Specify an image by tag ('hello-world:latest').
// Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123').
SourceImage *string `json:"sourceImage,omitempty"`
}
// ImportSourceCredentials ...
type ImportSourceCredentials struct {
// Username - The username to authenticate with the source registry.
Username *string `json:"username,omitempty"`
// Password - The password used to authenticate with the source registry.
Password *string `json:"password,omitempty"`
}
// IPRule IP rule with specific IP or IP range in CIDR format.
type IPRule struct {
// Action - The action of IP ACL rule. Possible values include: 'Allow'
Action Action `json:"action,omitempty"`
// IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed.
IPAddressOrRange *string `json:"value,omitempty"`
}
// NetworkRuleSet the network rule set for a container registry.
type NetworkRuleSet struct {
// DefaultAction - The default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny'
DefaultAction DefaultAction `json:"defaultAction,omitempty"`
// VirtualNetworkRules - The virtual network rules.
VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"`
// IPRules - The IP ACL rules.
IPRules *[]IPRule `json:"ipRules,omitempty"`
}
// OperationDefinition the definition of a container registry operation.
type OperationDefinition struct {
// Origin - The origin information of the container registry operation.
Origin *string `json:"origin,omitempty"`
// Name - Operation name: {provider}/{resource}/{operation}.
Name *string `json:"name,omitempty"`
// Display - The display information for the container registry operation.
Display *OperationDisplayDefinition `json:"display,omitempty"`
// OperationPropertiesDefinition - The properties information for the container registry operation.
*OperationPropertiesDefinition `json:"properties,omitempty"`
}
// MarshalJSON is the custom marshaler for OperationDefinition.
func (od OperationDefinition) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if od.Origin != nil {
objectMap["origin"] = od.Origin
}
if od.Name != nil {
objectMap["name"] = od.Name
}
if od.Display != nil {
objectMap["display"] = od.Display
}
if od.OperationPropertiesDefinition != nil {
objectMap["properties"] = od.OperationPropertiesDefinition
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for OperationDefinition struct.
func (od *OperationDefinition) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "origin":
if v != nil {
var origin string
err = json.Unmarshal(*v, &origin)
if err != nil {
return err
}
od.Origin = &origin
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
od.Name = &name
}
case "display":
if v != nil {
var display OperationDisplayDefinition
err = json.Unmarshal(*v, &display)
if err != nil {
return err
}
od.Display = &display
}
case "properties":
if v != nil {
var operationPropertiesDefinition OperationPropertiesDefinition
err = json.Unmarshal(*v, &operationPropertiesDefinition)
if err != nil {
return err
}
od.OperationPropertiesDefinition = &operationPropertiesDefinition
}
}
}
return nil
}
// OperationDisplayDefinition the display information for a container registry operation.
type OperationDisplayDefinition struct {
// Provider - The resource provider name: Microsoft.ContainerRegistry.
Provider *string `json:"provider,omitempty"`
// Resource - The resource on which the operation is performed.
Resource *string `json:"resource,omitempty"`
// Operation - The operation that users can perform.
Operation *string `json:"operation,omitempty"`
// Description - The description for the operation.
Description *string `json:"description,omitempty"`
}
// OperationListResult the result of a request to list container registry operations.
type OperationListResult struct {
autorest.Response `json:"-"`
// Value - The list of container registry operations. Since this list may be incomplete, the nextLink field should be used to request the next list of operations.
Value *[]OperationDefinition `json:"value,omitempty"`
// NextLink - The URI that can be used to request the next list of container registry operations.
NextLink *string `json:"nextLink,omitempty"`
}
// OperationListResultIterator provides access to a complete listing of OperationDefinition values.
type OperationListResultIterator struct {
i int
page OperationListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *OperationListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter OperationListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter OperationListResultIterator) Response() OperationListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter OperationListResultIterator) Value() OperationDefinition {
if !iter.page.NotDone() {
return OperationDefinition{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the OperationListResultIterator type.
func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
return OperationListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (olr OperationListResult) IsEmpty() bool {
return olr.Value == nil || len(*olr.Value) == 0
}
// operationListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) {
if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(olr.NextLink)))
}
// OperationListResultPage contains a page of OperationDefinition values.
type OperationListResultPage struct {
fn func(context.Context, OperationListResult) (OperationListResult, error)
olr OperationListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.olr)
if err != nil {
return err
}
page.olr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *OperationListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page OperationListResultPage) NotDone() bool {
return !page.olr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page OperationListResultPage) Response() OperationListResult {
return page.olr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page OperationListResultPage) Values() []OperationDefinition {
if page.olr.IsEmpty() {
return nil
}
return *page.olr.Value
}
// Creates a new instance of the OperationListResultPage type.
func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
return OperationListResultPage{fn: getNextPage}
}
// OperationMetricSpecificationDefinition the definition of Azure Monitoring metric.
type OperationMetricSpecificationDefinition struct {
// Name - Metric name.
Name *string `json:"name,omitempty"`
// DisplayName - Metric display name.
DisplayName *string `json:"displayName,omitempty"`
// DisplayDescription - Metric description.
DisplayDescription *string `json:"displayDescription,omitempty"`
// Unit - Metric unit.
Unit *string `json:"unit,omitempty"`
// AggregationType - Metric aggregation type.
AggregationType *string `json:"aggregationType,omitempty"`
// InternalMetricName - Internal metric name.
InternalMetricName *string `json:"internalMetricName,omitempty"`
}
// OperationPropertiesDefinition the definition of Azure Monitoring properties.
type OperationPropertiesDefinition struct {
// ServiceSpecification - The definition of Azure Monitoring service.
ServiceSpecification *OperationServiceSpecificationDefinition `json:"serviceSpecification,omitempty"`
}
// OperationServiceSpecificationDefinition the definition of Azure Monitoring metrics list.
type OperationServiceSpecificationDefinition struct {
// MetricSpecifications - A list of Azure Monitoring metrics definition.
MetricSpecifications *[]OperationMetricSpecificationDefinition `json:"metricSpecifications,omitempty"`
}
// PlatformProperties the platform properties against which the run has to happen.
type PlatformProperties struct {
// Os - The operating system type required for the run. Possible values include: 'Windows', 'Linux'
Os OS `json:"os,omitempty"`
// Architecture - The OS architecture. Possible values include: 'Amd64', 'X86', 'Arm'
Architecture Architecture `json:"architecture,omitempty"`
// Variant - Variant of the CPU. Possible values include: 'V6', 'V7', 'V8'
Variant Variant `json:"variant,omitempty"`
}
// PlatformUpdateParameters the properties for updating the platform configuration.
type PlatformUpdateParameters struct {
// Os - The operating system type required for the run. Possible values include: 'Windows', 'Linux'
Os OS `json:"os,omitempty"`
// Architecture - The OS architecture. Possible values include: 'Amd64', 'X86', 'Arm'
Architecture Architecture `json:"architecture,omitempty"`
// Variant - Variant of the CPU. Possible values include: 'V6', 'V7', 'V8'
Variant Variant `json:"variant,omitempty"`
}
// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than
// required location and tags.
type ProxyResource struct {
// ID - The resource ID.
ID *string `json:"id,omitempty"`
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Type - The type of the resource.
Type *string `json:"type,omitempty"`
}
// QuarantinePolicy an object that represents quarantine policy for a container registry.
type QuarantinePolicy struct {
// Status - The value that indicates whether the policy is enabled or not. Possible values include: 'Enabled', 'Disabled'
Status PolicyStatus `json:"status,omitempty"`
}
// RegenerateCredentialParameters the parameters used to regenerate the login credential.
type RegenerateCredentialParameters struct {
// Name - Specifies name of the password which should be regenerated -- password or password2. Possible values include: 'Password', 'Password2'
Name PasswordName `json:"name,omitempty"`
}
// RegistriesCreateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RegistriesCreateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RegistriesCreateFuture) Result(client RegistriesClient) (r Registry, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesCreateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent {
r, err = client.CreateResponder(r.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", r.Response.Response, "Failure responding to request")
}
}
return
}
// RegistriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RegistriesDeleteFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RegistriesDeleteFuture) Result(client RegistriesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesDeleteFuture")
return
}
ar.Response = future.Response()
return
}
// RegistriesImportImageFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RegistriesImportImageFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RegistriesImportImageFuture) Result(client RegistriesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesImportImageFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesImportImageFuture")
return
}
ar.Response = future.Response()
return
}
// RegistriesScheduleRunFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RegistriesScheduleRunFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RegistriesScheduleRunFuture) Result(client RegistriesClient) (r Run, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesScheduleRunFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent {
r, err = client.ScheduleRunResponder(r.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", r.Response.Response, "Failure responding to request")
}
}
return
}
// RegistriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RegistriesUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RegistriesUpdateFuture) Result(client RegistriesClient) (r Registry, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent {
r, err = client.UpdateResponder(r.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", r.Response.Response, "Failure responding to request")
}
}
return
}
// RegistriesUpdatePoliciesFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type RegistriesUpdatePoliciesFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RegistriesUpdatePoliciesFuture) Result(client RegistriesClient) (rp RegistryPolicies, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdatePoliciesFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesUpdatePoliciesFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if rp.Response.Response, err = future.GetResult(sender); err == nil && rp.Response.Response.StatusCode != http.StatusNoContent {
rp, err = client.UpdatePoliciesResponder(rp.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdatePoliciesFuture", "Result", rp.Response.Response, "Failure responding to request")
}
}
return
}
// Registry an object that represents a container registry.
type Registry struct {
autorest.Response `json:"-"`
// Sku - The SKU of the container registry.
Sku *Sku `json:"sku,omitempty"`
// Identity - The identity of the container registry.
Identity *RegistryIdentity `json:"identity,omitempty"`
// RegistryProperties - The properties of the container registry.
*RegistryProperties `json:"properties,omitempty"`
// ID - The resource ID.
ID *string `json:"id,omitempty"`
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Type - The type of the resource.
Type *string `json:"type,omitempty"`
// Location - The location of the resource. This cannot be changed after the resource is created.
Location *string `json:"location,omitempty"`
// Tags - The tags of the resource.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for Registry.
func (r Registry) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if r.Sku != nil {
objectMap["sku"] = r.Sku
}
if r.Identity != nil {
objectMap["identity"] = r.Identity
}
if r.RegistryProperties != nil {
objectMap["properties"] = r.RegistryProperties
}
if r.ID != nil {
objectMap["id"] = r.ID
}
if r.Name != nil {
objectMap["name"] = r.Name
}
if r.Type != nil {
objectMap["type"] = r.Type
}
if r.Location != nil {
objectMap["location"] = r.Location
}
if r.Tags != nil {
objectMap["tags"] = r.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Registry struct.
func (r *Registry) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "sku":
if v != nil {
var sku Sku
err = json.Unmarshal(*v, &sku)
if err != nil {
return err
}
r.Sku = &sku
}
case "identity":
if v != nil {
var identity RegistryIdentity
err = json.Unmarshal(*v, &identity)
if err != nil {
return err
}
r.Identity = &identity
}
case "properties":
if v != nil {
var registryProperties RegistryProperties
err = json.Unmarshal(*v, ®istryProperties)
if err != nil {
return err
}
r.RegistryProperties = ®istryProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
r.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
r.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
r.Type = &typeVar
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
r.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
r.Tags = tags
}
}
}
return nil
}
// RegistryIdentity the identity of the container registry.
type RegistryIdentity struct {
// Type - The type of identity used for the registry.
Type *string `json:"type,omitempty"`
// PrincipalID - The principal ID of registry identity.
PrincipalID *string `json:"principalId,omitempty"`
// TenantID - The tenant ID associated with the registry.
TenantID *string `json:"tenantId,omitempty"`
}
// RegistryListCredentialsResult the response from the ListCredentials operation.
type RegistryListCredentialsResult struct {
autorest.Response `json:"-"`
// Username - The username for a container registry.
Username *string `json:"username,omitempty"`
// Passwords - The list of passwords for a container registry.
Passwords *[]RegistryPassword `json:"passwords,omitempty"`
}
// RegistryListResult the result of a request to list container registries.
type RegistryListResult struct {
autorest.Response `json:"-"`
// Value - The list of container registries. Since this list may be incomplete, the nextLink field should be used to request the next list of container registries.
Value *[]Registry `json:"value,omitempty"`
// NextLink - The URI that can be used to request the next list of container registries.
NextLink *string `json:"nextLink,omitempty"`
}
// RegistryListResultIterator provides access to a complete listing of Registry values.
type RegistryListResultIterator struct {
i int
page RegistryListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *RegistryListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RegistryListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *RegistryListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter RegistryListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter RegistryListResultIterator) Response() RegistryListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter RegistryListResultIterator) Value() Registry {
if !iter.page.NotDone() {
return Registry{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the RegistryListResultIterator type.
func NewRegistryListResultIterator(page RegistryListResultPage) RegistryListResultIterator {
return RegistryListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (rlr RegistryListResult) IsEmpty() bool {
return rlr.Value == nil || len(*rlr.Value) == 0
}
// registryListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (rlr RegistryListResult) registryListResultPreparer(ctx context.Context) (*http.Request, error) {
if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(rlr.NextLink)))
}
// RegistryListResultPage contains a page of Registry values.
type RegistryListResultPage struct {
fn func(context.Context, RegistryListResult) (RegistryListResult, error)
rlr RegistryListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *RegistryListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RegistryListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.rlr)
if err != nil {
return err
}
page.rlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *RegistryListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page RegistryListResultPage) NotDone() bool {
return !page.rlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page RegistryListResultPage) Response() RegistryListResult {
return page.rlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page RegistryListResultPage) Values() []Registry {
if page.rlr.IsEmpty() {
return nil
}
return *page.rlr.Value
}
// Creates a new instance of the RegistryListResultPage type.
func NewRegistryListResultPage(getNextPage func(context.Context, RegistryListResult) (RegistryListResult, error)) RegistryListResultPage {
return RegistryListResultPage{fn: getNextPage}
}
// RegistryNameCheckRequest a request to check whether a container registry name is available.
type RegistryNameCheckRequest struct {
// Name - The name of the container registry.
Name *string `json:"name,omitempty"`
// Type - The resource type of the container registry. This field must be set to 'Microsoft.ContainerRegistry/registries'.
Type *string `json:"type,omitempty"`
}
// RegistryNameStatus the result of a request to check the availability of a container registry name.
type RegistryNameStatus struct {
autorest.Response `json:"-"`
// NameAvailable - The value that indicates whether the name is available.
NameAvailable *bool `json:"nameAvailable,omitempty"`
// Reason - If any, the reason that the name is not available.
Reason *string `json:"reason,omitempty"`
// Message - If any, the error message that provides more detail for the reason that the name is not available.
Message *string `json:"message,omitempty"`
}
// RegistryPassword the login password for the container registry.
type RegistryPassword struct {
// Name - The password name. Possible values include: 'Password', 'Password2'
Name PasswordName `json:"name,omitempty"`
// Value - The password value.
Value *string `json:"value,omitempty"`
}
// RegistryPolicies an object that represents policies for a container registry.
type RegistryPolicies struct {
autorest.Response `json:"-"`
// QuarantinePolicy - An object that represents quarantine policy for a container registry.
QuarantinePolicy *QuarantinePolicy `json:"quarantinePolicy,omitempty"`
// TrustPolicy - An object that represents content trust policy for a container registry.
TrustPolicy *TrustPolicy `json:"trustPolicy,omitempty"`
}
// RegistryProperties the properties of a container registry.
type RegistryProperties struct {
// LoginServer - The URL that can be used to log into the container registry.
LoginServer *string `json:"loginServer,omitempty"`
// CreationDate - The creation date of the container registry in ISO8601 format.
CreationDate *date.Time `json:"creationDate,omitempty"`
// ProvisioningState - The provisioning state of the container registry at the time the operation was called. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// Status - The status of the container registry at the time the operation was called.
Status *Status `json:"status,omitempty"`
// AdminUserEnabled - The value that indicates whether the admin user is enabled.
AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"`
// StorageAccount - The properties of the storage account for the container registry. Only applicable to Classic SKU.
StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"`
// NetworkRuleSet - The network rule set for a container registry.
NetworkRuleSet *NetworkRuleSet `json:"networkRuleSet,omitempty"`
}
// RegistryPropertiesUpdateParameters the parameters for updating the properties of a container registry.
type RegistryPropertiesUpdateParameters struct {
// AdminUserEnabled - The value that indicates whether the admin user is enabled.
AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"`
// StorageAccount - The parameters of a storage account for the container registry. Only applicable to Classic SKU. If specified, the storage account must be in the same physical location as the container registry.
StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"`
// NetworkRuleSet - The network rule set for a container registry.
NetworkRuleSet *NetworkRuleSet `json:"networkRuleSet,omitempty"`
}
// RegistryUpdateParameters the parameters for updating a container registry.
type RegistryUpdateParameters struct {
// Tags - The tags for the container registry.
Tags map[string]*string `json:"tags"`
// Sku - The SKU of the container registry.
Sku *Sku `json:"sku,omitempty"`
// Identity - The identity of the container registry.
Identity *RegistryIdentity `json:"identity,omitempty"`
// RegistryPropertiesUpdateParameters - The properties that the container registry will be updated with.
*RegistryPropertiesUpdateParameters `json:"properties,omitempty"`
}
// MarshalJSON is the custom marshaler for RegistryUpdateParameters.
func (rup RegistryUpdateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if rup.Tags != nil {
objectMap["tags"] = rup.Tags
}
if rup.Sku != nil {
objectMap["sku"] = rup.Sku
}
if rup.Identity != nil {
objectMap["identity"] = rup.Identity
}
if rup.RegistryPropertiesUpdateParameters != nil {
objectMap["properties"] = rup.RegistryPropertiesUpdateParameters
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for RegistryUpdateParameters struct.
func (rup *RegistryUpdateParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
rup.Tags = tags
}
case "sku":
if v != nil {
var sku Sku
err = json.Unmarshal(*v, &sku)
if err != nil {
return err
}
rup.Sku = &sku
}
case "identity":
if v != nil {
var identity RegistryIdentity
err = json.Unmarshal(*v, &identity)
if err != nil {
return err
}
rup.Identity = &identity
}
case "properties":
if v != nil {
var registryPropertiesUpdateParameters RegistryPropertiesUpdateParameters
err = json.Unmarshal(*v, ®istryPropertiesUpdateParameters)
if err != nil {
return err
}
rup.RegistryPropertiesUpdateParameters = ®istryPropertiesUpdateParameters
}
}
}
return nil
}
// RegistryUsage the quota usage for a container registry.
type RegistryUsage struct {
// Name - The name of the usage.
Name *string `json:"name,omitempty"`
// Limit - The limit of the usage.
Limit *int64 `json:"limit,omitempty"`
// CurrentValue - The current value of the usage.
CurrentValue *int64 `json:"currentValue,omitempty"`
// Unit - The unit of measurement. Possible values include: 'Count', 'Bytes'
Unit RegistryUsageUnit `json:"unit,omitempty"`
}
// RegistryUsageListResult the result of a request to get container registry quota usages.
type RegistryUsageListResult struct {
autorest.Response `json:"-"`
// Value - The list of container registry quota usages.
Value *[]RegistryUsage `json:"value,omitempty"`
}
// Replication an object that represents a replication for a container registry.
type Replication struct {
autorest.Response `json:"-"`
// ReplicationProperties - The properties of the replication.
*ReplicationProperties `json:"properties,omitempty"`
// ID - The resource ID.
ID *string `json:"id,omitempty"`
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Type - The type of the resource.
Type *string `json:"type,omitempty"`
// Location - The location of the resource. This cannot be changed after the resource is created.
Location *string `json:"location,omitempty"`
// Tags - The tags of the resource.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for Replication.
func (r Replication) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if r.ReplicationProperties != nil {
objectMap["properties"] = r.ReplicationProperties
}
if r.ID != nil {
objectMap["id"] = r.ID
}
if r.Name != nil {
objectMap["name"] = r.Name
}
if r.Type != nil {
objectMap["type"] = r.Type
}
if r.Location != nil {
objectMap["location"] = r.Location
}
if r.Tags != nil {
objectMap["tags"] = r.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Replication struct.
func (r *Replication) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var replicationProperties ReplicationProperties
err = json.Unmarshal(*v, &replicationProperties)
if err != nil {
return err
}
r.ReplicationProperties = &replicationProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
r.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
r.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
r.Type = &typeVar
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
r.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
r.Tags = tags
}
}
}
return nil
}
// ReplicationListResult the result of a request to list replications for a container registry.
type ReplicationListResult struct {
autorest.Response `json:"-"`
// Value - The list of replications. Since this list may be incomplete, the nextLink field should be used to request the next list of replications.
Value *[]Replication `json:"value,omitempty"`
// NextLink - The URI that can be used to request the next list of replications.
NextLink *string `json:"nextLink,omitempty"`
}
// ReplicationListResultIterator provides access to a complete listing of Replication values.
type ReplicationListResultIterator struct {
i int
page ReplicationListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *ReplicationListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *ReplicationListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter ReplicationListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter ReplicationListResultIterator) Response() ReplicationListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter ReplicationListResultIterator) Value() Replication {
if !iter.page.NotDone() {
return Replication{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the ReplicationListResultIterator type.
func NewReplicationListResultIterator(page ReplicationListResultPage) ReplicationListResultIterator {
return ReplicationListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (rlr ReplicationListResult) IsEmpty() bool {
return rlr.Value == nil || len(*rlr.Value) == 0
}
// replicationListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (rlr ReplicationListResult) replicationListResultPreparer(ctx context.Context) (*http.Request, error) {
if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(rlr.NextLink)))
}
// ReplicationListResultPage contains a page of Replication values.
type ReplicationListResultPage struct {
fn func(context.Context, ReplicationListResult) (ReplicationListResult, error)
rlr ReplicationListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *ReplicationListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.rlr)
if err != nil {
return err
}
page.rlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *ReplicationListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page ReplicationListResultPage) NotDone() bool {
return !page.rlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page ReplicationListResultPage) Response() ReplicationListResult {
return page.rlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page ReplicationListResultPage) Values() []Replication {
if page.rlr.IsEmpty() {
return nil
}
return *page.rlr.Value
}
// Creates a new instance of the ReplicationListResultPage type.
func NewReplicationListResultPage(getNextPage func(context.Context, ReplicationListResult) (ReplicationListResult, error)) ReplicationListResultPage {
return ReplicationListResultPage{fn: getNextPage}
}
// ReplicationProperties the properties of a replication.
type ReplicationProperties struct {
// ProvisioningState - The provisioning state of the replication at the time the operation was called. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// Status - The status of the replication at the time the operation was called.
Status *Status `json:"status,omitempty"`
}
// ReplicationsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ReplicationsCreateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *ReplicationsCreateFuture) Result(client ReplicationsClient) (r Replication, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsCreateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent {
r, err = client.CreateResponder(r.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", r.Response.Response, "Failure responding to request")
}
}
return
}
// ReplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ReplicationsDeleteFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *ReplicationsDeleteFuture) Result(client ReplicationsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsDeleteFuture")
return
}
ar.Response = future.Response()
return
}
// ReplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ReplicationsUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *ReplicationsUpdateFuture) Result(client ReplicationsClient) (r Replication, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent {
r, err = client.UpdateResponder(r.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", r.Response.Response, "Failure responding to request")
}
}
return
}
// ReplicationUpdateParameters the parameters for updating a replication.
type ReplicationUpdateParameters struct {
// Tags - The tags for the replication.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for ReplicationUpdateParameters.
func (rup ReplicationUpdateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if rup.Tags != nil {
objectMap["tags"] = rup.Tags
}
return json.Marshal(objectMap)
}
// Request the request that generated the event.
type Request struct {
// ID - The ID of the request that initiated the event.
ID *string `json:"id,omitempty"`
// Addr - The IP or hostname and possibly port of the client connection that initiated the event. This is the RemoteAddr from the standard http request.
Addr *string `json:"addr,omitempty"`
// Host - The externally accessible hostname of the registry instance, as specified by the http host header on incoming requests.
Host *string `json:"host,omitempty"`
// Method - The request method that generated the event.
Method *string `json:"method,omitempty"`
// Useragent - The user agent header of the request.
Useragent *string `json:"useragent,omitempty"`
}
// Resource an Azure resource.
type Resource struct {
// ID - The resource ID.
ID *string `json:"id,omitempty"`
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Type - The type of the resource.
Type *string `json:"type,omitempty"`
// Location - The location of the resource. This cannot be changed after the resource is created.
Location *string `json:"location,omitempty"`
// Tags - The tags of the resource.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for Resource.
func (r Resource) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if r.ID != nil {
objectMap["id"] = r.ID
}
if r.Name != nil {
objectMap["name"] = r.Name
}
if r.Type != nil {
objectMap["type"] = r.Type
}
if r.Location != nil {
objectMap["location"] = r.Location
}
if r.Tags != nil {
objectMap["tags"] = r.Tags
}
return json.Marshal(objectMap)
}
// Run run resource properties
type Run struct {
autorest.Response `json:"-"`
// RunProperties - The properties of a run.
*RunProperties `json:"properties,omitempty"`
// ID - The resource ID.
ID *string `json:"id,omitempty"`
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Type - The type of the resource.
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for Run.
func (r Run) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if r.RunProperties != nil {
objectMap["properties"] = r.RunProperties
}
if r.ID != nil {
objectMap["id"] = r.ID
}
if r.Name != nil {
objectMap["name"] = r.Name
}
if r.Type != nil {
objectMap["type"] = r.Type
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Run struct.
func (r *Run) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var runProperties RunProperties
err = json.Unmarshal(*v, &runProperties)
if err != nil {
return err
}
r.RunProperties = &runProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
r.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
r.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
r.Type = &typeVar
}
}
}
return nil
}
// RunFilter properties that are enabled for Odata querying on runs.
type RunFilter struct {
// RunID - The unique identifier for the run.
RunID *string `json:"runId,omitempty"`
// RunType - The type of run. Possible values include: 'QuickBuild', 'QuickRun', 'AutoBuild', 'AutoRun'
RunType RunType `json:"runType,omitempty"`
// Status - The current status of the run. Possible values include: 'RunStatusQueued', 'RunStatusStarted', 'RunStatusRunning', 'RunStatusSucceeded', 'RunStatusFailed', 'RunStatusCanceled', 'RunStatusError', 'RunStatusTimeout'
Status RunStatus `json:"status,omitempty"`
// CreateTime - The create time for a run.
CreateTime *date.Time `json:"createTime,omitempty"`
// FinishTime - The time the run finished.
FinishTime *date.Time `json:"finishTime,omitempty"`
// OutputImageManifests - The list of comma-separated image manifests that were generated from the run. This is applicable if the run is of
// build type.
OutputImageManifests *string `json:"outputImageManifests,omitempty"`
// IsArchiveEnabled - The value that indicates whether archiving is enabled or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
// TaskName - The name of the task that the run corresponds to.
TaskName *string `json:"taskName,omitempty"`
}
// RunGetLogResult the result of get log link operation.
type RunGetLogResult struct {
autorest.Response `json:"-"`
// LogLink - The link to logs for a run on a azure container registry.
LogLink *string `json:"logLink,omitempty"`
}
// RunListResult collection of runs.
type RunListResult struct {
autorest.Response `json:"-"`
// Value - The collection value.
Value *[]Run `json:"value,omitempty"`
// NextLink - The URI that can be used to request the next set of paged results.
NextLink *string `json:"nextLink,omitempty"`
}
// RunListResultIterator provides access to a complete listing of Run values.
type RunListResultIterator struct {
i int
page RunListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *RunListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RunListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *RunListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter RunListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter RunListResultIterator) Response() RunListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter RunListResultIterator) Value() Run {
if !iter.page.NotDone() {
return Run{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the RunListResultIterator type.
func NewRunListResultIterator(page RunListResultPage) RunListResultIterator {
return RunListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (rlr RunListResult) IsEmpty() bool {
return rlr.Value == nil || len(*rlr.Value) == 0
}
// runListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (rlr RunListResult) runListResultPreparer(ctx context.Context) (*http.Request, error) {
if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(rlr.NextLink)))
}
// RunListResultPage contains a page of Run values.
type RunListResultPage struct {
fn func(context.Context, RunListResult) (RunListResult, error)
rlr RunListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *RunListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RunListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.rlr)
if err != nil {
return err
}
page.rlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *RunListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page RunListResultPage) NotDone() bool {
return !page.rlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page RunListResultPage) Response() RunListResult {
return page.rlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page RunListResultPage) Values() []Run {
if page.rlr.IsEmpty() {
return nil
}
return *page.rlr.Value
}
// Creates a new instance of the RunListResultPage type.
func NewRunListResultPage(getNextPage func(context.Context, RunListResult) (RunListResult, error)) RunListResultPage {
return RunListResultPage{fn: getNextPage}
}
// RunProperties the properties for a run.
type RunProperties struct {
// RunID - The unique identifier for the run.
RunID *string `json:"runId,omitempty"`
// Status - The current status of the run. Possible values include: 'RunStatusQueued', 'RunStatusStarted', 'RunStatusRunning', 'RunStatusSucceeded', 'RunStatusFailed', 'RunStatusCanceled', 'RunStatusError', 'RunStatusTimeout'
Status RunStatus `json:"status,omitempty"`
// LastUpdatedTime - The last updated time for the run.
LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"`
// RunType - The type of run. Possible values include: 'QuickBuild', 'QuickRun', 'AutoBuild', 'AutoRun'
RunType RunType `json:"runType,omitempty"`
// CreateTime - The time the run was scheduled.
CreateTime *date.Time `json:"createTime,omitempty"`
// StartTime - The time the run started.
StartTime *date.Time `json:"startTime,omitempty"`
// FinishTime - The time the run finished.
FinishTime *date.Time `json:"finishTime,omitempty"`
// OutputImages - The list of all images that were generated from the run. This is applicable if the run generates base image dependencies.
OutputImages *[]ImageDescriptor `json:"outputImages,omitempty"`
// Task - The task against which run was scheduled.
Task *string `json:"task,omitempty"`
// ImageUpdateTrigger - The image update trigger that caused the run. This is applicable if the task has base image trigger configured.
ImageUpdateTrigger *ImageUpdateTrigger `json:"imageUpdateTrigger,omitempty"`
// SourceTrigger - The source trigger that caused the run.
SourceTrigger *SourceTriggerDescriptor `json:"sourceTrigger,omitempty"`
// Platform - The platform properties against which the run will happen.
Platform *PlatformProperties `json:"platform,omitempty"`
// AgentConfiguration - The machine configuration of the run agent.
AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"`
// SourceRegistryAuth - The scope of the credentials that were used to login to the source registry during this run.
SourceRegistryAuth *string `json:"sourceRegistryAuth,omitempty"`
// CustomRegistries - The list of custom registries that were logged in during this run.
CustomRegistries *[]string `json:"customRegistries,omitempty"`
// ProvisioningState - The provisioning state of a run. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// IsArchiveEnabled - The value that indicates whether archiving is enabled or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
}
// BasicRunRequest the request parameters for scheduling a run.
type BasicRunRequest interface {
AsDockerBuildRequest() (*DockerBuildRequest, bool)
AsFileTaskRunRequest() (*FileTaskRunRequest, bool)
AsTaskRunRequest() (*TaskRunRequest, bool)
AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool)
AsRunRequest() (*RunRequest, bool)
}
// RunRequest the request parameters for scheduling a run.
type RunRequest struct {
// IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
// Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest'
Type Type `json:"type,omitempty"`
}
func unmarshalBasicRunRequest(body []byte) (BasicRunRequest, error) {
var m map[string]interface{}
err := json.Unmarshal(body, &m)
if err != nil {
return nil, err
}
switch m["type"] {
case string(TypeDockerBuildRequest):
var dbr DockerBuildRequest
err := json.Unmarshal(body, &dbr)
return dbr, err
case string(TypeFileTaskRunRequest):
var ftrr FileTaskRunRequest
err := json.Unmarshal(body, &ftrr)
return ftrr, err
case string(TypeTaskRunRequest):
var trr TaskRunRequest
err := json.Unmarshal(body, &trr)
return trr, err
case string(TypeEncodedTaskRunRequest):
var etrr EncodedTaskRunRequest
err := json.Unmarshal(body, &etrr)
return etrr, err
default:
var rr RunRequest
err := json.Unmarshal(body, &rr)
return rr, err
}
}
func unmarshalBasicRunRequestArray(body []byte) ([]BasicRunRequest, error) {
var rawMessages []*json.RawMessage
err := json.Unmarshal(body, &rawMessages)
if err != nil {
return nil, err
}
rrArray := make([]BasicRunRequest, len(rawMessages))
for index, rawMessage := range rawMessages {
rr, err := unmarshalBasicRunRequest(*rawMessage)
if err != nil {
return nil, err
}
rrArray[index] = rr
}
return rrArray, nil
}
// MarshalJSON is the custom marshaler for RunRequest.
func (rr RunRequest) MarshalJSON() ([]byte, error) {
rr.Type = TypeRunRequest
objectMap := make(map[string]interface{})
if rr.IsArchiveEnabled != nil {
objectMap["isArchiveEnabled"] = rr.IsArchiveEnabled
}
if rr.Type != "" {
objectMap["type"] = rr.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildRequest is the BasicRunRequest implementation for RunRequest.
func (rr RunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) {
return nil, false
}
// AsFileTaskRunRequest is the BasicRunRequest implementation for RunRequest.
func (rr RunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) {
return nil, false
}
// AsTaskRunRequest is the BasicRunRequest implementation for RunRequest.
func (rr RunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) {
return nil, false
}
// AsEncodedTaskRunRequest is the BasicRunRequest implementation for RunRequest.
func (rr RunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) {
return nil, false
}
// AsRunRequest is the BasicRunRequest implementation for RunRequest.
func (rr RunRequest) AsRunRequest() (*RunRequest, bool) {
return &rr, true
}
// AsBasicRunRequest is the BasicRunRequest implementation for RunRequest.
func (rr RunRequest) AsBasicRunRequest() (BasicRunRequest, bool) {
return &rr, true
}
// RunsCancelFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type RunsCancelFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RunsCancelFuture) Result(client RunsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RunsCancelFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RunsCancelFuture")
return
}
ar.Response = future.Response()
return
}
// RunsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type RunsUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *RunsUpdateFuture) Result(client RunsClient) (r Run, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.RunsUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent {
r, err = client.UpdateResponder(r.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", r.Response.Response, "Failure responding to request")
}
}
return
}
// RunUpdateParameters the set of run properties that can be updated.
type RunUpdateParameters struct {
// IsArchiveEnabled - The value that indicates whether archiving is enabled or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
}
// SecretObject describes the properties of a secret object value.
type SecretObject struct {
// Value - The value of the secret. The format of this value will be determined
// based on the type of the secret object. If the type is Opaque, the value will be
// used as is without any modification.
Value *string `json:"value,omitempty"`
// Type - The type of the secret object which determines how the value of the secret object has to be
// interpreted. Possible values include: 'Opaque'
Type SecretObjectType `json:"type,omitempty"`
}
// SetValue the properties of a overridable value that can be passed to a task template.
type SetValue struct {
// Name - The name of the overridable value.
Name *string `json:"name,omitempty"`
// Value - The overridable value.
Value *string `json:"value,omitempty"`
// IsSecret - Flag to indicate whether the value represents a secret or not.
IsSecret *bool `json:"isSecret,omitempty"`
}
// Sku the SKU of a container registry.
type Sku struct {
// Name - The SKU name of the container registry. Required for registry creation. Possible values include: 'Classic', 'Basic', 'Standard', 'Premium'
Name SkuName `json:"name,omitempty"`
// Tier - The SKU tier based on the SKU name. Possible values include: 'SkuTierClassic', 'SkuTierBasic', 'SkuTierStandard', 'SkuTierPremium'
Tier SkuTier `json:"tier,omitempty"`
}
// Source the registry node that generated the event. Put differently, while the actor initiates the event,
// the source generates it.
type Source struct {
// Addr - The IP or hostname and the port of the registry node that generated the event. Generally, this will be resolved by os.Hostname() along with the running port.
Addr *string `json:"addr,omitempty"`
// InstanceID - The running instance of an application. Changes after each restart.
InstanceID *string `json:"instanceID,omitempty"`
}
// SourceProperties the properties of the source code repository.
type SourceProperties struct {
// SourceControlType - The type of source control service. Possible values include: 'Github', 'VisualStudioTeamService'
SourceControlType SourceControlType `json:"sourceControlType,omitempty"`
// RepositoryURL - The full URL to the source code repository
RepositoryURL *string `json:"repositoryUrl,omitempty"`
// Branch - The branch name of the source code.
Branch *string `json:"branch,omitempty"`
// SourceControlAuthProperties - The authorization properties for accessing the source code repository and to set up
// webhooks for notifications.
SourceControlAuthProperties *AuthInfo `json:"sourceControlAuthProperties,omitempty"`
}
// SourceRegistryCredentials describes the credential parameters for accessing the source registry.
type SourceRegistryCredentials struct {
// LoginMode - The authentication mode which determines the source registry login scope. The credentials for the source registry
// will be generated using the given scope. These credentials will be used to login to
// the source registry during the run. Possible values include: 'None', 'Default'
LoginMode SourceRegistryLoginMode `json:"loginMode,omitempty"`
}
// SourceTrigger the properties of a source based trigger.
type SourceTrigger struct {
// SourceRepository - The properties that describes the source(code) for the task.
SourceRepository *SourceProperties `json:"sourceRepository,omitempty"`
// SourceTriggerEvents - The source event corresponding to the trigger.
SourceTriggerEvents *[]SourceTriggerEvent `json:"sourceTriggerEvents,omitempty"`
// Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled'
Status TriggerStatus `json:"status,omitempty"`
// Name - The name of the trigger.
Name *string `json:"name,omitempty"`
}
// SourceTriggerDescriptor the source trigger that caused a run.
type SourceTriggerDescriptor struct {
// ID - The unique ID of the trigger.
ID *string `json:"id,omitempty"`
// EventType - The event type of the trigger.
EventType *string `json:"eventType,omitempty"`
// CommitID - The unique ID that identifies a commit.
CommitID *string `json:"commitId,omitempty"`
// PullRequestID - The unique ID that identifies pull request.
PullRequestID *string `json:"pullRequestId,omitempty"`
// RepositoryURL - The repository URL.
RepositoryURL *string `json:"repositoryUrl,omitempty"`
// BranchName - The branch name in the repository.
BranchName *string `json:"branchName,omitempty"`
// ProviderType - The source control provider type.
ProviderType *string `json:"providerType,omitempty"`
}
// SourceTriggerUpdateParameters the properties for updating a source based trigger.
type SourceTriggerUpdateParameters struct {
// SourceRepository - The properties that describes the source(code) for the task.
SourceRepository *SourceUpdateParameters `json:"sourceRepository,omitempty"`
// SourceTriggerEvents - The source event corresponding to the trigger.
SourceTriggerEvents *[]SourceTriggerEvent `json:"sourceTriggerEvents,omitempty"`
// Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled'
Status TriggerStatus `json:"status,omitempty"`
// Name - The name of the trigger.
Name *string `json:"name,omitempty"`
}
// SourceUpdateParameters the properties for updating the source code repository.
type SourceUpdateParameters struct {
// SourceControlType - The type of source control service. Possible values include: 'Github', 'VisualStudioTeamService'
SourceControlType SourceControlType `json:"sourceControlType,omitempty"`
// RepositoryURL - The full URL to the source code repository
RepositoryURL *string `json:"repositoryUrl,omitempty"`
// Branch - The branch name of the source code.
Branch *string `json:"branch,omitempty"`
// SourceControlAuthProperties - The authorization properties for accessing the source code repository and to set up
// webhooks for notifications.
SourceControlAuthProperties *AuthInfoUpdateParameters `json:"sourceControlAuthProperties,omitempty"`
}
// SourceUploadDefinition the properties of a response to source upload request.
type SourceUploadDefinition struct {
autorest.Response `json:"-"`
// UploadURL - The URL where the client can upload the source.
UploadURL *string `json:"uploadUrl,omitempty"`
// RelativePath - The relative path to the source. This is used to submit the subsequent queue build request.
RelativePath *string `json:"relativePath,omitempty"`
}
// Status the status of an Azure resource at the time the operation was called.
type Status struct {
// DisplayStatus - The short label for the status.
DisplayStatus *string `json:"displayStatus,omitempty"`
// Message - The detailed message for the status, including alerts and error messages.
Message *string `json:"message,omitempty"`
// Timestamp - The timestamp when the status was changed to the current value.
Timestamp *date.Time `json:"timestamp,omitempty"`
}
// StorageAccountProperties the properties of a storage account for a container registry. Only applicable
// to Classic SKU.
type StorageAccountProperties struct {
// ID - The resource ID of the storage account.
ID *string `json:"id,omitempty"`
}
// Target the target of the event.
type Target struct {
// MediaType - The MIME type of the referenced object.
MediaType *string `json:"mediaType,omitempty"`
// Size - The number of bytes of the content. Same as Length field.
Size *int64 `json:"size,omitempty"`
// Digest - The digest of the content, as defined by the Registry V2 HTTP API Specification.
Digest *string `json:"digest,omitempty"`
// Length - The number of bytes of the content. Same as Size field.
Length *int64 `json:"length,omitempty"`
// Repository - The repository name.
Repository *string `json:"repository,omitempty"`
// URL - The direct URL to the content.
URL *string `json:"url,omitempty"`
// Tag - The tag name.
Tag *string `json:"tag,omitempty"`
}
// Task the task that has the ARM resource and task properties.
// The task will have all information to schedule a run against it.
type Task struct {
autorest.Response `json:"-"`
// TaskProperties - The properties of a task.
*TaskProperties `json:"properties,omitempty"`
// ID - The resource ID.
ID *string `json:"id,omitempty"`
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Type - The type of the resource.
Type *string `json:"type,omitempty"`
// Location - The location of the resource. This cannot be changed after the resource is created.
Location *string `json:"location,omitempty"`
// Tags - The tags of the resource.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for Task.
func (t Task) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if t.TaskProperties != nil {
objectMap["properties"] = t.TaskProperties
}
if t.ID != nil {
objectMap["id"] = t.ID
}
if t.Name != nil {
objectMap["name"] = t.Name
}
if t.Type != nil {
objectMap["type"] = t.Type
}
if t.Location != nil {
objectMap["location"] = t.Location
}
if t.Tags != nil {
objectMap["tags"] = t.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Task struct.
func (t *Task) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var taskProperties TaskProperties
err = json.Unmarshal(*v, &taskProperties)
if err != nil {
return err
}
t.TaskProperties = &taskProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
t.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
t.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
t.Type = &typeVar
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
t.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
t.Tags = tags
}
}
}
return nil
}
// TaskListResult the collection of tasks.
type TaskListResult struct {
autorest.Response `json:"-"`
// Value - The collection value.
Value *[]Task `json:"value,omitempty"`
// NextLink - The URI that can be used to request the next set of paged results.
NextLink *string `json:"nextLink,omitempty"`
}
// TaskListResultIterator provides access to a complete listing of Task values.
type TaskListResultIterator struct {
i int
page TaskListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *TaskListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/TaskListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *TaskListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter TaskListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter TaskListResultIterator) Response() TaskListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter TaskListResultIterator) Value() Task {
if !iter.page.NotDone() {
return Task{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the TaskListResultIterator type.
func NewTaskListResultIterator(page TaskListResultPage) TaskListResultIterator {
return TaskListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (tlr TaskListResult) IsEmpty() bool {
return tlr.Value == nil || len(*tlr.Value) == 0
}
// taskListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (tlr TaskListResult) taskListResultPreparer(ctx context.Context) (*http.Request, error) {
if tlr.NextLink == nil || len(to.String(tlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(tlr.NextLink)))
}
// TaskListResultPage contains a page of Task values.
type TaskListResultPage struct {
fn func(context.Context, TaskListResult) (TaskListResult, error)
tlr TaskListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *TaskListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/TaskListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.tlr)
if err != nil {
return err
}
page.tlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *TaskListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page TaskListResultPage) NotDone() bool {
return !page.tlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page TaskListResultPage) Response() TaskListResult {
return page.tlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page TaskListResultPage) Values() []Task {
if page.tlr.IsEmpty() {
return nil
}
return *page.tlr.Value
}
// Creates a new instance of the TaskListResultPage type.
func NewTaskListResultPage(getNextPage func(context.Context, TaskListResult) (TaskListResult, error)) TaskListResultPage {
return TaskListResultPage{fn: getNextPage}
}
// TaskProperties the properties of a task.
type TaskProperties struct {
// ProvisioningState - The provisioning state of the task. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// CreationDate - The creation date of task.
CreationDate *date.Time `json:"creationDate,omitempty"`
// Status - The current status of task. Possible values include: 'TaskStatusDisabled', 'TaskStatusEnabled'
Status TaskStatus `json:"status,omitempty"`
// Platform - The platform properties against which the run has to happen.
Platform *PlatformProperties `json:"platform,omitempty"`
// AgentConfiguration - The machine configuration of the run agent.
AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"`
// Timeout - Run timeout in seconds.
Timeout *int32 `json:"timeout,omitempty"`
// Step - The properties of a task step.
Step BasicTaskStepProperties `json:"step,omitempty"`
// Trigger - The properties that describe all triggers for the task.
Trigger *TriggerProperties `json:"trigger,omitempty"`
// Credentials - The properties that describes a set of credentials that will be used when this run is invoked.
Credentials *Credentials `json:"credentials,omitempty"`
}
// UnmarshalJSON is the custom unmarshaler for TaskProperties struct.
func (tp *TaskProperties) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "provisioningState":
if v != nil {
var provisioningState ProvisioningState
err = json.Unmarshal(*v, &provisioningState)
if err != nil {
return err
}
tp.ProvisioningState = provisioningState
}
case "creationDate":
if v != nil {
var creationDate date.Time
err = json.Unmarshal(*v, &creationDate)
if err != nil {
return err
}
tp.CreationDate = &creationDate
}
case "status":
if v != nil {
var status TaskStatus
err = json.Unmarshal(*v, &status)
if err != nil {
return err
}
tp.Status = status
}
case "platform":
if v != nil {
var platform PlatformProperties
err = json.Unmarshal(*v, &platform)
if err != nil {
return err
}
tp.Platform = &platform
}
case "agentConfiguration":
if v != nil {
var agentConfiguration AgentProperties
err = json.Unmarshal(*v, &agentConfiguration)
if err != nil {
return err
}
tp.AgentConfiguration = &agentConfiguration
}
case "timeout":
if v != nil {
var timeout int32
err = json.Unmarshal(*v, &timeout)
if err != nil {
return err
}
tp.Timeout = &timeout
}
case "step":
if v != nil {
step, err := unmarshalBasicTaskStepProperties(*v)
if err != nil {
return err
}
tp.Step = step
}
case "trigger":
if v != nil {
var trigger TriggerProperties
err = json.Unmarshal(*v, &trigger)
if err != nil {
return err
}
tp.Trigger = &trigger
}
case "credentials":
if v != nil {
var credentials Credentials
err = json.Unmarshal(*v, &credentials)
if err != nil {
return err
}
tp.Credentials = &credentials
}
}
}
return nil
}
// TaskPropertiesUpdateParameters the properties for updating a task.
type TaskPropertiesUpdateParameters struct {
// Status - The current status of task. Possible values include: 'TaskStatusDisabled', 'TaskStatusEnabled'
Status TaskStatus `json:"status,omitempty"`
// Platform - The platform properties against which the run has to happen.
Platform *PlatformUpdateParameters `json:"platform,omitempty"`
// AgentConfiguration - The machine configuration of the run agent.
AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"`
// Timeout - Run timeout in seconds.
Timeout *int32 `json:"timeout,omitempty"`
// Step - The properties for updating a task step.
Step BasicTaskStepUpdateParameters `json:"step,omitempty"`
// Trigger - The properties for updating trigger properties.
Trigger *TriggerUpdateParameters `json:"trigger,omitempty"`
// Credentials - The parameters that describes a set of credentials that will be used when this run is invoked.
Credentials *Credentials `json:"credentials,omitempty"`
}
// UnmarshalJSON is the custom unmarshaler for TaskPropertiesUpdateParameters struct.
func (tpup *TaskPropertiesUpdateParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "status":
if v != nil {
var status TaskStatus
err = json.Unmarshal(*v, &status)
if err != nil {
return err
}
tpup.Status = status
}
case "platform":
if v != nil {
var platform PlatformUpdateParameters
err = json.Unmarshal(*v, &platform)
if err != nil {
return err
}
tpup.Platform = &platform
}
case "agentConfiguration":
if v != nil {
var agentConfiguration AgentProperties
err = json.Unmarshal(*v, &agentConfiguration)
if err != nil {
return err
}
tpup.AgentConfiguration = &agentConfiguration
}
case "timeout":
if v != nil {
var timeout int32
err = json.Unmarshal(*v, &timeout)
if err != nil {
return err
}
tpup.Timeout = &timeout
}
case "step":
if v != nil {
step, err := unmarshalBasicTaskStepUpdateParameters(*v)
if err != nil {
return err
}
tpup.Step = step
}
case "trigger":
if v != nil {
var trigger TriggerUpdateParameters
err = json.Unmarshal(*v, &trigger)
if err != nil {
return err
}
tpup.Trigger = &trigger
}
case "credentials":
if v != nil {
var credentials Credentials
err = json.Unmarshal(*v, &credentials)
if err != nil {
return err
}
tpup.Credentials = &credentials
}
}
}
return nil
}
// TaskRunRequest the parameters for a task run request.
type TaskRunRequest struct {
// TaskName - The name of task against which run has to be queued.
TaskName *string `json:"taskName,omitempty"`
// Values - The collection of overridable values that can be passed when running a task.
Values *[]SetValue `json:"values,omitempty"`
// IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not.
IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"`
// Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest'
Type Type `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for TaskRunRequest.
func (trr TaskRunRequest) MarshalJSON() ([]byte, error) {
trr.Type = TypeTaskRunRequest
objectMap := make(map[string]interface{})
if trr.TaskName != nil {
objectMap["taskName"] = trr.TaskName
}
if trr.Values != nil {
objectMap["values"] = trr.Values
}
if trr.IsArchiveEnabled != nil {
objectMap["isArchiveEnabled"] = trr.IsArchiveEnabled
}
if trr.Type != "" {
objectMap["type"] = trr.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildRequest is the BasicRunRequest implementation for TaskRunRequest.
func (trr TaskRunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) {
return nil, false
}
// AsFileTaskRunRequest is the BasicRunRequest implementation for TaskRunRequest.
func (trr TaskRunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) {
return nil, false
}
// AsTaskRunRequest is the BasicRunRequest implementation for TaskRunRequest.
func (trr TaskRunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) {
return &trr, true
}
// AsEncodedTaskRunRequest is the BasicRunRequest implementation for TaskRunRequest.
func (trr TaskRunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) {
return nil, false
}
// AsRunRequest is the BasicRunRequest implementation for TaskRunRequest.
func (trr TaskRunRequest) AsRunRequest() (*RunRequest, bool) {
return nil, false
}
// AsBasicRunRequest is the BasicRunRequest implementation for TaskRunRequest.
func (trr TaskRunRequest) AsBasicRunRequest() (BasicRunRequest, bool) {
return &trr, true
}
// TasksCreateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type TasksCreateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *TasksCreateFuture) Result(client TasksClient) (t Task, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.TasksCreateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent {
t, err = client.CreateResponder(t.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", t.Response.Response, "Failure responding to request")
}
}
return
}
// TasksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type TasksDeleteFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *TasksDeleteFuture) Result(client TasksClient) (ar autorest.Response, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.TasksDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.TasksDeleteFuture")
return
}
ar.Response = future.Response()
return
}
// BasicTaskStepProperties base properties for any task step.
type BasicTaskStepProperties interface {
AsDockerBuildStep() (*DockerBuildStep, bool)
AsFileTaskStep() (*FileTaskStep, bool)
AsEncodedTaskStep() (*EncodedTaskStep, bool)
AsTaskStepProperties() (*TaskStepProperties, bool)
}
// TaskStepProperties base properties for any task step.
type TaskStepProperties struct {
// BaseImageDependencies - List of base image dependencies for a step.
BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"`
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask'
Type TypeBasicTaskStepProperties `json:"type,omitempty"`
}
func unmarshalBasicTaskStepProperties(body []byte) (BasicTaskStepProperties, error) {
var m map[string]interface{}
err := json.Unmarshal(body, &m)
if err != nil {
return nil, err
}
switch m["type"] {
case string(TypeDocker):
var dbs DockerBuildStep
err := json.Unmarshal(body, &dbs)
return dbs, err
case string(TypeFileTask):
var fts FileTaskStep
err := json.Unmarshal(body, &fts)
return fts, err
case string(TypeEncodedTask):
var ets EncodedTaskStep
err := json.Unmarshal(body, &ets)
return ets, err
default:
var tsp TaskStepProperties
err := json.Unmarshal(body, &tsp)
return tsp, err
}
}
func unmarshalBasicTaskStepPropertiesArray(body []byte) ([]BasicTaskStepProperties, error) {
var rawMessages []*json.RawMessage
err := json.Unmarshal(body, &rawMessages)
if err != nil {
return nil, err
}
tspArray := make([]BasicTaskStepProperties, len(rawMessages))
for index, rawMessage := range rawMessages {
tsp, err := unmarshalBasicTaskStepProperties(*rawMessage)
if err != nil {
return nil, err
}
tspArray[index] = tsp
}
return tspArray, nil
}
// MarshalJSON is the custom marshaler for TaskStepProperties.
func (tsp TaskStepProperties) MarshalJSON() ([]byte, error) {
tsp.Type = TypeTaskStepProperties
objectMap := make(map[string]interface{})
if tsp.BaseImageDependencies != nil {
objectMap["baseImageDependencies"] = tsp.BaseImageDependencies
}
if tsp.ContextPath != nil {
objectMap["contextPath"] = tsp.ContextPath
}
if tsp.ContextAccessToken != nil {
objectMap["contextAccessToken"] = tsp.ContextAccessToken
}
if tsp.Type != "" {
objectMap["type"] = tsp.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStep is the BasicTaskStepProperties implementation for TaskStepProperties.
func (tsp TaskStepProperties) AsDockerBuildStep() (*DockerBuildStep, bool) {
return nil, false
}
// AsFileTaskStep is the BasicTaskStepProperties implementation for TaskStepProperties.
func (tsp TaskStepProperties) AsFileTaskStep() (*FileTaskStep, bool) {
return nil, false
}
// AsEncodedTaskStep is the BasicTaskStepProperties implementation for TaskStepProperties.
func (tsp TaskStepProperties) AsEncodedTaskStep() (*EncodedTaskStep, bool) {
return nil, false
}
// AsTaskStepProperties is the BasicTaskStepProperties implementation for TaskStepProperties.
func (tsp TaskStepProperties) AsTaskStepProperties() (*TaskStepProperties, bool) {
return &tsp, true
}
// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for TaskStepProperties.
func (tsp TaskStepProperties) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) {
return &tsp, true
}
// BasicTaskStepUpdateParameters base properties for updating any task step.
type BasicTaskStepUpdateParameters interface {
AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool)
AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool)
AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool)
AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool)
}
// TaskStepUpdateParameters base properties for updating any task step.
type TaskStepUpdateParameters struct {
// ContextPath - The URL(absolute or relative) of the source context for the task step.
ContextPath *string `json:"contextPath,omitempty"`
// ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step.
ContextAccessToken *string `json:"contextAccessToken,omitempty"`
// Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask'
Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"`
}
func unmarshalBasicTaskStepUpdateParameters(body []byte) (BasicTaskStepUpdateParameters, error) {
var m map[string]interface{}
err := json.Unmarshal(body, &m)
if err != nil {
return nil, err
}
switch m["type"] {
case string(TypeBasicTaskStepUpdateParametersTypeDocker):
var dbsup DockerBuildStepUpdateParameters
err := json.Unmarshal(body, &dbsup)
return dbsup, err
case string(TypeBasicTaskStepUpdateParametersTypeFileTask):
var ftsup FileTaskStepUpdateParameters
err := json.Unmarshal(body, &ftsup)
return ftsup, err
case string(TypeBasicTaskStepUpdateParametersTypeEncodedTask):
var etsup EncodedTaskStepUpdateParameters
err := json.Unmarshal(body, &etsup)
return etsup, err
default:
var tsup TaskStepUpdateParameters
err := json.Unmarshal(body, &tsup)
return tsup, err
}
}
func unmarshalBasicTaskStepUpdateParametersArray(body []byte) ([]BasicTaskStepUpdateParameters, error) {
var rawMessages []*json.RawMessage
err := json.Unmarshal(body, &rawMessages)
if err != nil {
return nil, err
}
tsupArray := make([]BasicTaskStepUpdateParameters, len(rawMessages))
for index, rawMessage := range rawMessages {
tsup, err := unmarshalBasicTaskStepUpdateParameters(*rawMessage)
if err != nil {
return nil, err
}
tsupArray[index] = tsup
}
return tsupArray, nil
}
// MarshalJSON is the custom marshaler for TaskStepUpdateParameters.
func (tsup TaskStepUpdateParameters) MarshalJSON() ([]byte, error) {
tsup.Type = TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters
objectMap := make(map[string]interface{})
if tsup.ContextPath != nil {
objectMap["contextPath"] = tsup.ContextPath
}
if tsup.ContextAccessToken != nil {
objectMap["contextAccessToken"] = tsup.ContextAccessToken
}
if tsup.Type != "" {
objectMap["type"] = tsup.Type
}
return json.Marshal(objectMap)
}
// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters.
func (tsup TaskStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) {
return nil, false
}
// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters.
func (tsup TaskStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) {
return nil, false
}
// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters.
func (tsup TaskStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) {
return nil, false
}
// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters.
func (tsup TaskStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) {
return &tsup, true
}
// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters.
func (tsup TaskStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) {
return &tsup, true
}
// TasksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type TasksUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *TasksUpdateFuture) Result(client TasksClient) (t Task, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.TasksUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent {
t, err = client.UpdateResponder(t.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", t.Response.Response, "Failure responding to request")
}
}
return
}
// TaskUpdateParameters the parameters for updating a task.
type TaskUpdateParameters struct {
// TaskPropertiesUpdateParameters - The properties for updating a task.
*TaskPropertiesUpdateParameters `json:"properties,omitempty"`
// Tags - The ARM resource tags.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for TaskUpdateParameters.
func (tup TaskUpdateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if tup.TaskPropertiesUpdateParameters != nil {
objectMap["properties"] = tup.TaskPropertiesUpdateParameters
}
if tup.Tags != nil {
objectMap["tags"] = tup.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for TaskUpdateParameters struct.
func (tup *TaskUpdateParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var taskPropertiesUpdateParameters TaskPropertiesUpdateParameters
err = json.Unmarshal(*v, &taskPropertiesUpdateParameters)
if err != nil {
return err
}
tup.TaskPropertiesUpdateParameters = &taskPropertiesUpdateParameters
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
tup.Tags = tags
}
}
}
return nil
}
// TriggerProperties the properties of a trigger.
type TriggerProperties struct {
// SourceTriggers - The collection of triggers based on source code repository.
SourceTriggers *[]SourceTrigger `json:"sourceTriggers,omitempty"`
// BaseImageTrigger - The trigger based on base image dependencies.
BaseImageTrigger *BaseImageTrigger `json:"baseImageTrigger,omitempty"`
}
// TriggerUpdateParameters the properties for updating triggers.
type TriggerUpdateParameters struct {
// SourceTriggers - The collection of triggers based on source code repository.
SourceTriggers *[]SourceTriggerUpdateParameters `json:"sourceTriggers,omitempty"`
// BaseImageTrigger - The trigger based on base image dependencies.
BaseImageTrigger *BaseImageTriggerUpdateParameters `json:"baseImageTrigger,omitempty"`
}
// TrustPolicy an object that represents content trust policy for a container registry.
type TrustPolicy struct {
// Type - The type of trust policy. Possible values include: 'Notary'
Type TrustPolicyType `json:"type,omitempty"`
// Status - The value that indicates whether the policy is enabled or not. Possible values include: 'Enabled', 'Disabled'
Status PolicyStatus `json:"status,omitempty"`
}
// VirtualNetworkRule virtual network rule.
type VirtualNetworkRule struct {
// Action - The action of virtual network rule. Possible values include: 'Allow'
Action Action `json:"action,omitempty"`
// VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
VirtualNetworkResourceID *string `json:"id,omitempty"`
}
// Webhook an object that represents a webhook for a container registry.
type Webhook struct {
autorest.Response `json:"-"`
// WebhookProperties - The properties of the webhook.
*WebhookProperties `json:"properties,omitempty"`
// ID - The resource ID.
ID *string `json:"id,omitempty"`
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Type - The type of the resource.
Type *string `json:"type,omitempty"`
// Location - The location of the resource. This cannot be changed after the resource is created.
Location *string `json:"location,omitempty"`
// Tags - The tags of the resource.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for Webhook.
func (w Webhook) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if w.WebhookProperties != nil {
objectMap["properties"] = w.WebhookProperties
}
if w.ID != nil {
objectMap["id"] = w.ID
}
if w.Name != nil {
objectMap["name"] = w.Name
}
if w.Type != nil {
objectMap["type"] = w.Type
}
if w.Location != nil {
objectMap["location"] = w.Location
}
if w.Tags != nil {
objectMap["tags"] = w.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Webhook struct.
func (w *Webhook) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var webhookProperties WebhookProperties
err = json.Unmarshal(*v, &webhookProperties)
if err != nil {
return err
}
w.WebhookProperties = &webhookProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
w.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
w.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
w.Type = &typeVar
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
w.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
w.Tags = tags
}
}
}
return nil
}
// WebhookCreateParameters the parameters for creating a webhook.
type WebhookCreateParameters struct {
// Tags - The tags for the webhook.
Tags map[string]*string `json:"tags"`
// Location - The location of the webhook. This cannot be changed after the resource is created.
Location *string `json:"location,omitempty"`
// WebhookPropertiesCreateParameters - The properties that the webhook will be created with.
*WebhookPropertiesCreateParameters `json:"properties,omitempty"`
}
// MarshalJSON is the custom marshaler for WebhookCreateParameters.
func (wcp WebhookCreateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if wcp.Tags != nil {
objectMap["tags"] = wcp.Tags
}
if wcp.Location != nil {
objectMap["location"] = wcp.Location
}
if wcp.WebhookPropertiesCreateParameters != nil {
objectMap["properties"] = wcp.WebhookPropertiesCreateParameters
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for WebhookCreateParameters struct.
func (wcp *WebhookCreateParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
wcp.Tags = tags
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
wcp.Location = &location
}
case "properties":
if v != nil {
var webhookPropertiesCreateParameters WebhookPropertiesCreateParameters
err = json.Unmarshal(*v, &webhookPropertiesCreateParameters)
if err != nil {
return err
}
wcp.WebhookPropertiesCreateParameters = &webhookPropertiesCreateParameters
}
}
}
return nil
}
// WebhookListResult the result of a request to list webhooks for a container registry.
type WebhookListResult struct {
autorest.Response `json:"-"`
// Value - The list of webhooks. Since this list may be incomplete, the nextLink field should be used to request the next list of webhooks.
Value *[]Webhook `json:"value,omitempty"`
// NextLink - The URI that can be used to request the next list of webhooks.
NextLink *string `json:"nextLink,omitempty"`
}
// WebhookListResultIterator provides access to a complete listing of Webhook values.
type WebhookListResultIterator struct {
i int
page WebhookListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *WebhookListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/WebhookListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *WebhookListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter WebhookListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter WebhookListResultIterator) Response() WebhookListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter WebhookListResultIterator) Value() Webhook {
if !iter.page.NotDone() {
return Webhook{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the WebhookListResultIterator type.
func NewWebhookListResultIterator(page WebhookListResultPage) WebhookListResultIterator {
return WebhookListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (wlr WebhookListResult) IsEmpty() bool {
return wlr.Value == nil || len(*wlr.Value) == 0
}
// webhookListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (wlr WebhookListResult) webhookListResultPreparer(ctx context.Context) (*http.Request, error) {
if wlr.NextLink == nil || len(to.String(wlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(wlr.NextLink)))
}
// WebhookListResultPage contains a page of Webhook values.
type WebhookListResultPage struct {
fn func(context.Context, WebhookListResult) (WebhookListResult, error)
wlr WebhookListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *WebhookListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/WebhookListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.wlr)
if err != nil {
return err
}
page.wlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *WebhookListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page WebhookListResultPage) NotDone() bool {
return !page.wlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page WebhookListResultPage) Response() WebhookListResult {
return page.wlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page WebhookListResultPage) Values() []Webhook {
if page.wlr.IsEmpty() {
return nil
}
return *page.wlr.Value
}
// Creates a new instance of the WebhookListResultPage type.
func NewWebhookListResultPage(getNextPage func(context.Context, WebhookListResult) (WebhookListResult, error)) WebhookListResultPage {
return WebhookListResultPage{fn: getNextPage}
}
// WebhookProperties the properties of a webhook.
type WebhookProperties struct {
// Status - The status of the webhook at the time the operation was called. Possible values include: 'WebhookStatusEnabled', 'WebhookStatusDisabled'
Status WebhookStatus `json:"status,omitempty"`
// Scope - The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
Scope *string `json:"scope,omitempty"`
// Actions - The list of actions that trigger the webhook to post notifications.
Actions *[]WebhookAction `json:"actions,omitempty"`
// ProvisioningState - The provisioning state of the webhook at the time the operation was called. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
}
// WebhookPropertiesCreateParameters the parameters for creating the properties of a webhook.
type WebhookPropertiesCreateParameters struct {
// ServiceURI - The service URI for the webhook to post notifications.
ServiceURI *string `json:"serviceUri,omitempty"`
// CustomHeaders - Custom headers that will be added to the webhook notifications.
CustomHeaders map[string]*string `json:"customHeaders"`
// Status - The status of the webhook at the time the operation was called. Possible values include: 'WebhookStatusEnabled', 'WebhookStatusDisabled'
Status WebhookStatus `json:"status,omitempty"`
// Scope - The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
Scope *string `json:"scope,omitempty"`
// Actions - The list of actions that trigger the webhook to post notifications.
Actions *[]WebhookAction `json:"actions,omitempty"`
}
// MarshalJSON is the custom marshaler for WebhookPropertiesCreateParameters.
func (wpcp WebhookPropertiesCreateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if wpcp.ServiceURI != nil {
objectMap["serviceUri"] = wpcp.ServiceURI
}
if wpcp.CustomHeaders != nil {
objectMap["customHeaders"] = wpcp.CustomHeaders
}
if wpcp.Status != "" {
objectMap["status"] = wpcp.Status
}
if wpcp.Scope != nil {
objectMap["scope"] = wpcp.Scope
}
if wpcp.Actions != nil {
objectMap["actions"] = wpcp.Actions
}
return json.Marshal(objectMap)
}
// WebhookPropertiesUpdateParameters the parameters for updating the properties of a webhook.
type WebhookPropertiesUpdateParameters struct {
// ServiceURI - The service URI for the webhook to post notifications.
ServiceURI *string `json:"serviceUri,omitempty"`
// CustomHeaders - Custom headers that will be added to the webhook notifications.
CustomHeaders map[string]*string `json:"customHeaders"`
// Status - The status of the webhook at the time the operation was called. Possible values include: 'WebhookStatusEnabled', 'WebhookStatusDisabled'
Status WebhookStatus `json:"status,omitempty"`
// Scope - The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
Scope *string `json:"scope,omitempty"`
// Actions - The list of actions that trigger the webhook to post notifications.
Actions *[]WebhookAction `json:"actions,omitempty"`
}
// MarshalJSON is the custom marshaler for WebhookPropertiesUpdateParameters.
func (wpup WebhookPropertiesUpdateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if wpup.ServiceURI != nil {
objectMap["serviceUri"] = wpup.ServiceURI
}
if wpup.CustomHeaders != nil {
objectMap["customHeaders"] = wpup.CustomHeaders
}
if wpup.Status != "" {
objectMap["status"] = wpup.Status
}
if wpup.Scope != nil {
objectMap["scope"] = wpup.Scope
}
if wpup.Actions != nil {
objectMap["actions"] = wpup.Actions
}
return json.Marshal(objectMap)
}
// WebhooksCreateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type WebhooksCreateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *WebhooksCreateFuture) Result(client WebhooksClient) (w Webhook, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksCreateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent {
w, err = client.CreateResponder(w.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", w.Response.Response, "Failure responding to request")
}
}
return
}
// WebhooksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type WebhooksDeleteFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *WebhooksDeleteFuture) Result(client WebhooksClient) (ar autorest.Response, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.WebhooksDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksDeleteFuture")
return
}
ar.Response = future.Response()
return
}
// WebhooksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type WebhooksUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *WebhooksUpdateFuture) Result(client WebhooksClient) (w Webhook, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent {
w, err = client.UpdateResponder(w.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", w.Response.Response, "Failure responding to request")
}
}
return
}
// WebhookUpdateParameters the parameters for updating a webhook.
type WebhookUpdateParameters struct {
// Tags - The tags for the webhook.
Tags map[string]*string `json:"tags"`
// WebhookPropertiesUpdateParameters - The properties that the webhook will be updated with.
*WebhookPropertiesUpdateParameters `json:"properties,omitempty"`
}
// MarshalJSON is the custom marshaler for WebhookUpdateParameters.
func (wup WebhookUpdateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if wup.Tags != nil {
objectMap["tags"] = wup.Tags
}
if wup.WebhookPropertiesUpdateParameters != nil {
objectMap["properties"] = wup.WebhookPropertiesUpdateParameters
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for WebhookUpdateParameters struct.
func (wup *WebhookUpdateParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
wup.Tags = tags
}
case "properties":
if v != nil {
var webhookPropertiesUpdateParameters WebhookPropertiesUpdateParameters
err = json.Unmarshal(*v, &webhookPropertiesUpdateParameters)
if err != nil {
return err
}
wup.WebhookPropertiesUpdateParameters = &webhookPropertiesUpdateParameters
}
}
}
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
webapp/python/app.py | import MySQLdb.cursors
import flask
import functools
import os
import pathlib
import copy
import json
import subprocess
from io import StringIO
import csv
from datetime import datetime, timezone
base_path = pathlib.Path(__file__).resolve().parent.parent
static_folder = base_path / 'static'
icons_folder = base_path / 'public' / 'icons'
ranks_num = None
ranks_id = None
ranks_price = None
user_id_hash = dict()
user_name_hash = dict()
class CustomFlask(flask.Flask):
jinja_options = flask.Flask.jinja_options.copy()
jinja_options.update(dict(
block_start_string='(%',
block_end_string='%)',
variable_start_string='((',
variable_end_string='))',
comment_start_string='(#',
comment_end_string='#)',
))
app = CustomFlask(__name__, static_folder=str(static_folder), static_url_path='')
app.config['SECRET_KEY'] = 'tagomoris'
if not os.path.exists(str(icons_folder)):
os.makedirs(str(icons_folder))
def make_base_url(request):
return request.url_root[:-1]
@app.template_filter('tojsonsafe')
def tojsonsafe(target):
return json.dumps(target).replace("+", "\\u002b").replace("<", "\\u003c").replace(">", "\\u003e")
def jsonify(target):
return json.dumps(target)
def res_error(error="unknown", status=500):
return jsonify({"error": error}), status
def login_required(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not get_login_user():
return res_error('login_required', 401)
return f(*args, **kwargs)
return wrapper
def admin_login_required(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not get_login_administrator():
return res_error('admin_login_required', 401)
return f(*args, **kwargs)
return wrapper
def dbh():
if hasattr(flask.g, 'db'):
return flask.g.db
flask.g.db = MySQLdb.connect(
host=os.environ['DB_HOST'],
port=3306,
user=os.environ['DB_USER'],
password=os.environ['DB_PASS'],
database=os.environ['DB_DATABASE'],
charset='utf8mb4',
cursorclass=MySQLdb.cursors.DictCursor,
autocommit=True,
)
cur = flask.g.db.cursor()
cur.execute(
"SET SESSION sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,"
"ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'")
return flask.g.db
def get_user_id_hash(user_id, cur):
global user_id_hash
if user_id in user_id_hash:
return user_id_hash[user_id]
else:
cur.execute("SELECT id, nickname FROM users WHERE id = %s", [user_id])
user = cur.fetchone()
if user is not None:
user_id_hash[user_id] = user
return user
def get_user_name_hash(user_name, cur):
global user_name_hash
if user_name in user_name_hash:
return user_name_hash[user_name]
else:
cur.execute("SELECT * FROM users WHERE login_name = %s", [user_name])
user = cur.fetchone()
if user is not None:
user_name_hash[user_name] = user
return user
def extract_public_fg(e):
return e["public_fg"]
def ranks_data():
global ranks_num, ranks_id, ranks_price
if ranks_num and ranks_id and ranks_price:
return
cur = dbh().cursor()
if ranks_num is None:
cur.execute("SELECT rank, COUNT(id) FROM sheets GROUP BY rank")
ranks_num = {e["rank"]: e["COUNT(id)"] for e in cur.fetchall()}
if ranks_price is None:
cur.execute("SELECT price, rank FROM sheets GROUP BY rank")
ranks_price = {e["rank"]: e["price"] for e in cur.fetchall()}
if ranks_id is None:
cur.execute("SELECT * FROM sheets ORDER BY `rank`, num")
sheets = cur.fetchall()
ranks_id = {k: dict() for k in ranks_num.keys()}
for sheet in sheets:
ranks_id[sheet["rank"]][sheet["id"]] = len(ranks_id[sheet["rank"]])
@app.teardown_appcontext
def teardown(error):
if hasattr(flask.g, "db"):
flask.g.db.close()
def get_events(filter_fn=lambda e: True):
conn = dbh()
conn.autocommit(False)
cur = conn.cursor()
try:
cur.execute("SELECT * FROM events ORDER BY id ASC")
rows = cur.fetchall()
event_ids = [row['id'] for row in rows if filter_fn(row)]
events = []
for event_id in event_ids:
event = get_event(event_id)
for sheet in event['sheets'].values():
del sheet['detail']
events.append(event)
conn.commit()
except MySQLdb.Error as e:
conn.rollback()
raise e
return events
def get_event(event_id, login_user_id=None):
cur = dbh().cursor()
ranks_data()
cur.execute("SELECT * FROM events WHERE id = %s", [event_id])
event = cur.fetchone()
if not event:
return None
event["total"] = 0
event["remains"] = 0
event["sheets"] = {}
for rank in ranks_num.keys():
event["sheets"][rank] = {'total': 0, 'remains': 0, 'detail': []}
cur.execute(
"SELECT num, user_id, reserved_at, rank, sheet_id FROM sheets LEFT OUTER JOIN reservations as r"
" ON r.sheet_id = sheets.id WHERE r.event_id = %s AND r.canceled_at IS NULL"
" GROUP BY sheet_id HAVING r.reserved_at = MIN(r.reserved_at)", [event['id']])
reservations = cur.fetchall()
for rank in ranks_num.keys():
event['sheets'][rank]['detail'] = [{"num": i+1} for i in range(ranks_num[rank])]
event["sheets"][rank]["remains"] = ranks_num[rank]
event['sheets'][rank]['total'] = ranks_num[rank]
if not event['sheets'][rank].get('price'):
event["sheets"][rank]["price"] = event['price'] + ranks_price[rank]
event["remains"] = sum(ranks_num.values())
event['total'] = sum(ranks_num.values())
for reservation in reservations:
sheet = {"num": reservation["num"],
"mine": True if login_user_id and reservation['user_id'] == login_user_id else False,
"reserved": True,
"reserved_at": int(reservation['reserved_at'].replace(tzinfo=timezone.utc).timestamp())}
event["remains"] -= 1
event["sheets"][reservation["rank"]]["remains"] -= 1
event['sheets'][reservation['rank']]['detail'][ranks_id[reservation["rank"]][reservation["sheet_id"]]] = sheet
event['public'] = True if event['public_fg'] else False
event['closed'] = True if event['closed_fg'] else False
del event['public_fg']
del event['closed_fg']
return event
def sanitize_event(event):
sanitized = copy.copy(event)
del sanitized['price']
del sanitized['public']
del sanitized['closed']
return sanitized
def get_login_user():
if "user_id" not in flask.session:
return None
cur = dbh().cursor()
user_id = flask.session['user_id']
return get_user_id_hash(user_id, cur)
def get_login_administrator():
if "administrator_id" not in flask.session:
return None
cur = dbh().cursor()
administrator_id = flask.session['administrator_id']
cur.execute("SELECT id, nickname FROM administrators WHERE id = %s", [administrator_id])
return cur.fetchone()
def validate_rank(rank):
cur = dbh().cursor()
cur.execute("SELECT COUNT(*) AS total_sheets FROM sheets WHERE `rank` = %s", [rank])
ret = cur.fetchone()
return int(ret['total_sheets']) > 0
def render_report_csv(reports):
reports = sorted(reports, key=lambda x: x['sold_at'])
keys = ["reservation_id", "event_id", "rank", "num", "price", "user_id", "sold_at", "canceled_at"]
body = [keys]
for report in reports:
body.append([report[key] for key in keys])
f = StringIO()
writer = csv.writer(f)
writer.writerows(body)
res = flask.make_response()
res.data = f.getvalue()
res.headers['Content-Type'] = 'text/csv'
res.headers['Content-Disposition'] = 'attachment; filename=report.csv'
return res
@app.route('/')
def get_index():
user = get_login_user()
events = []
for event in get_events(extract_public_fg):
events.append(sanitize_event(event))
return flask.render_template('index.html', user=user, events=events, base_url=make_base_url(flask.request))
@app.route('/initialize')
def get_initialize():
subprocess.call(["../../db/init.sh"])
return '', 204
@app.route('/api/users', methods=['POST'])
def post_users():
nickname = flask.request.json['nickname']
login_name = flask.request.json['login_name']
password = flask.request.json['password']
conn = dbh()
conn.autocommit(False)
cur = conn.cursor()
try:
duplicated = get_user_name_hash(login_name, cur)
if duplicated:
conn.rollback()
return res_error('duplicated', 409)
cur.execute(
"INSERT INTO users (login_name, pass_hash, nickname) VALUES (%s, SHA2(%s, 256), %s)",
[login_name, password, nickname])
user_id = cur.lastrowid
conn.commit()
except MySQLdb.Error as e:
conn.rollback()
print(e)
return res_error()
return jsonify({"id": user_id, "nickname": nickname}), 201
@app.route('/api/users/<int:user_id>')
@login_required
def get_users(user_id):
cur = dbh().cursor()
user = get_user_id_hash(user_id, cur)
if user['id'] != get_login_user()['id']:
return '', 403
cur.execute(
"SELECT r.*, s.rank AS sheet_rank, s.num AS sheet_num FROM reservations r INNER JOIN sheets s"
" ON s.id = r.sheet_id WHERE r.user_id = %s ORDER BY IFNULL(r.canceled_at, r.reserved_at) DESC LIMIT 5",
[user['id']])
recent_reservations = []
for row in cur.fetchall():
event = get_event(row['event_id'])
price = event['sheets'][row['sheet_rank']]['price']
del event['sheets']
del event['total']
del event['remains']
if row['canceled_at']:
canceled_at = int(row['canceled_at'].replace(tzinfo=timezone.utc).timestamp())
else:
canceled_at = None
recent_reservations.append({
"id": int(row['id']),
"event": event,
"sheet_rank": row['sheet_rank'],
"sheet_num": int(row['sheet_num']),
"price": int(price),
"reserved_at": int(row['reserved_at'].replace(tzinfo=timezone.utc).timestamp()),
"canceled_at": canceled_at,
})
user['recent_reservations'] = recent_reservations
cur.execute(
"SELECT IFNULL(SUM(e.price + s.price), 0) AS total_price FROM reservations r INNER JOIN sheets s"
" ON s.id = r.sheet_id INNER JOIN events e ON e.id = r.event_id WHERE r.user_id = %s AND r.canceled_at IS NULL",
[user['id']])
row = cur.fetchone()
user['total_price'] = int(row['total_price'])
cur.execute(
"SELECT event_id FROM reservations WHERE user_id = %s GROUP BY event_id"
" ORDER BY MAX(IFNULL(canceled_at, reserved_at)) DESC LIMIT 5",
[user['id']])
rows = cur.fetchall()
recent_events = []
for row in rows:
event = get_event(row['event_id'])
for sheet in event['sheets'].values():
del sheet['detail']
recent_events.append(event)
user['recent_events'] = recent_events
return jsonify(user)
@app.route('/api/actions/login', methods=['POST'])
def post_login():
login_name = flask.request.json['login_name']
password = flask.request.json['password']
cur = dbh().cursor()
user = get_user_name_hash(login_name, cur)
cur.execute('SELECT SHA2(%s, 256) AS pass_hash', [password])
pass_hash = cur.fetchone()
if not user or pass_hash['pass_hash'] != user['pass_hash']:
return res_error("authentication_failed", 401)
flask.session['user_id'] = user["id"]
user = get_login_user()
return flask.jsonify(user)
@app.route('/api/actions/logout', methods=['POST'])
@login_required
def post_logout():
flask.session.pop('user_id', None)
return '', 204
@app.route('/api/events')
def get_events_api():
events = []
for event in get_events(extract_public_fg):
events.append(sanitize_event(event))
return jsonify(events)
@app.route('/api/events/<int:event_id>')
def get_events_by_id(event_id):
user = get_login_user()
if user:
event = get_event(event_id, user['id'])
else:
event = get_event(event_id)
if not event or not event["public"]:
return res_error("not_found", 404)
event = sanitize_event(event)
return jsonify(event)
@app.route('/api/events/<int:event_id>/actions/reserve', methods=['POST'])
@login_required
def post_reserve(event_id):
rank = flask.request.json["sheet_rank"]
user = get_login_user()
event = get_event(event_id, user['id'])
if not event or not event['public']:
return res_error("invalid_event", 404)
if not validate_rank(rank):
return res_error("invalid_rank", 400)
reservation_id = 0
conn = dbh()
cur = conn.cursor()
cur.execute(
"SELECT * FROM sheets WHERE id NOT IN (SELECT sheet_id FROM reservations WHERE event_id = %s"
" AND canceled_at IS NULL FOR UPDATE) AND `rank` =%s ORDER BY RAND() LIMIT 1",
[event['id'], rank])
sheet = cur.fetchone()
if not sheet:
return res_error("sold_out", 409)
try:
conn.autocommit(False)
cur = conn.cursor()
cur.execute(
"INSERT INTO reservations (event_id, sheet_id, user_id, reserved_at) VALUES (%s, %s, %s, %s)",
[event['id'], sheet['id'], user['id'], datetime.utcnow().strftime("%F %T.%f")])
reservation_id = cur.lastrowid
conn.commit()
except MySQLdb.Error as e:
conn.rollback()
print(e)
content = jsonify({
"id": reservation_id,
"sheet_rank": rank,
"sheet_num": sheet['num']})
return flask.Response(content, status=202, mimetype='application/json')
@app.route('/api/events/<int:event_id>/sheets/<rank>/<int:num>/reservation', methods=['DELETE'])
@login_required
def delete_reserve(event_id, rank, num):
user = get_login_user()
event = get_event(event_id, user['id'])
if not event or not event['public']:
return res_error("invalid_event", 404)
if not validate_rank(rank):
return res_error("invalid_rank", 404)
cur = dbh().cursor()
cur.execute('SELECT * FROM sheets WHERE `rank` = %s AND num = %s', [rank, num])
sheet = cur.fetchone()
if not sheet:
return res_error("invalid_sheet", 404)
conn = dbh()
try:
conn.autocommit(False)
cur = conn.cursor()
cur.execute(
"SELECT * FROM reservations WHERE event_id = %s AND sheet_id = %s AND canceled_at IS NULL"
" GROUP BY event_id HAVING reserved_at = MIN(reserved_at) FOR UPDATE",
[event['id'], sheet['id']])
reservation = cur.fetchone()
if not reservation:
conn.rollback()
return res_error("not_reserved", 400)
if reservation['user_id'] != user['id']:
conn.rollback()
return res_error("not_permitted", 403)
cur.execute(
"UPDATE reservations SET canceled_at = %s WHERE id = %s",
[datetime.utcnow().strftime("%F %T.%f"), reservation['id']])
conn.commit()
except MySQLdb.Error as e:
conn.rollback()
print(e)
return res_error()
return flask.Response(status=204)
@app.route('/admin/')
def get_admin():
administrator = get_login_administrator()
if administrator:
events = get_events()
else:
events = {}
return flask.render_template('admin.html', administrator=administrator, events=events,
base_url=make_base_url(flask.request))
@app.route('/admin/api/actions/login', methods=['POST'])
def post_adin_login():
login_name = flask.request.json['login_name']
password = flask.request.json['password']
cur = dbh().cursor()
cur.execute('SELECT * FROM administrators WHERE login_name = %s', [login_name])
administrator = cur.fetchone()
cur.execute('SELECT SHA2(%s, 256) AS pass_hash', [password])
pass_hash = cur.fetchone()
if not administrator or pass_hash['pass_hash'] != administrator['pass_hash']:
return res_error("authentication_failed", 401)
flask.session['administrator_id'] = administrator['id']
administrator = get_login_administrator()
return jsonify(administrator)
@app.route('/admin/api/actions/logout', methods=['POST'])
@admin_login_required
def get_admin_logout():
flask.session.pop('administrator_id', None)
return '', 204
@app.route('/admin/api/events')
@admin_login_required
def get_admin_events_api():
return jsonify(get_events())
@app.route('/admin/api/events', methods=['POST'])
@admin_login_required
def post_admin_events_api():
title = flask.request.json['title']
public = flask.request.json['public']
price = flask.request.json['price']
conn = dbh()
conn.autocommit(False)
cur = conn.cursor()
event_id = None
try:
cur.execute(
"INSERT INTO events (title, public_fg, closed_fg, price) VALUES (%s, %s, 0, %s)",
[title, public, price])
event_id = cur.lastrowid
conn.commit()
except MySQLdb.Error as e:
conn.rollback()
print(e)
return jsonify(get_event(event_id))
@app.route('/admin/api/events/<int:event_id>')
@admin_login_required
def get_admin_events_by_id(event_id):
event = get_event(event_id)
if not event:
return res_error("not_found", 404)
return jsonify(event)
@app.route('/admin/api/events/<int:event_id>/actions/edit', methods=['POST'])
@admin_login_required
def post_event_edit(event_id):
public = flask.request.json['public'] if 'public' in flask.request.json.keys() else False
closed = flask.request.json['closed'] if 'closed' in flask.request.json.keys() else False
if closed:
public = False
event = get_event(event_id)
if not event:
return res_error("not_found", 404)
if event['closed']:
return res_error('cannot_edit_closed_event', 400)
elif event['public'] and closed:
return res_error('cannot_close_public_event', 400)
conn = dbh()
conn.autocommit(False)
cur = conn.cursor()
try:
cur.execute(
"UPDATE events SET public_fg = %s, closed_fg = %s WHERE id = %s",
[public, closed, event['id']])
conn.commit()
except MySQLdb.Error:
conn.rollback()
return jsonify(get_event(event_id))
@app.route('/admin/api/reports/events/<int:event_id>/sales')
@admin_login_required
def get_admin_event_sales(event_id):
event = get_event(event_id)
cur = dbh().cursor()
cur.execute(
'SELECT r.*, s.rank AS sheet_rank, s.num AS sheet_num, s.price AS sheet_price, e.price AS event_price'
' FROM reservations r INNER JOIN sheets s ON s.id = r.sheet_id INNER JOIN events e ON e.id = r.event_id'
' WHERE r.event_id = %s ORDER BY reserved_at ASC FOR UPDATE',
[event['id']])
reports = [{
"reservation_id": reservation['id'],
"event_id": event['id'],
"rank": reservation['sheet_rank'],
"num": reservation['sheet_num'],
"user_id": reservation['user_id'],
"sold_at": reservation['reserved_at'].isoformat()+"Z",
"canceled_at": reservation['canceled_at'].isoformat() + "Z" if reservation['canceled_at'] else "",
"price": reservation['event_price'] + reservation['sheet_price']} for reservation in cur.fetchall()]
return render_report_csv(reports)
@app.route('/admin/api/reports/sales')
@admin_login_required
def get_admin_sales():
cur = dbh().cursor()
cur.execute(
'SELECT r.*, s.rank AS sheet_rank, s.num AS sheet_num, s.price AS sheet_price, e.id AS event_id, e.price'
' AS event_price FROM reservations r INNER JOIN sheets s ON s.id = r.sheet_id INNER JOIN events e'
' ON e.id = r.event_id ORDER BY reserved_at ASC FOR UPDATE')
reports = [{
"reservation_id": reservation['id'],
"event_id": reservation['event_id'],
"rank": reservation['sheet_rank'],
"num": reservation['sheet_num'],
"user_id": reservation['user_id'],
"sold_at": reservation['reserved_at'].isoformat()+"Z",
"canceled_at": reservation['canceled_at'].isoformat()+"Z" if reservation['canceled_at'] else "",
"price": reservation['event_price'] + reservation['sheet_price']} for reservation in cur.fetchall()]
return render_report_csv(reports)
if __name__ == "__main__":
app.run(port=8080, debug=True, threaded=True, host="0.0.0.0")
| []
| []
| [
"DB_PASS",
"DB_DATABASE",
"DB_USER",
"DB_HOST"
]
| [] | ["DB_PASS", "DB_DATABASE", "DB_USER", "DB_HOST"] | python | 4 | 0 | |
main.py | from handleenv import handleenv
handleenv()
import os
from datetime import datetime as dt
from feed import makeFeed, pasta
from grab import atualizaListaYouTube, baixarNovos, pasta_download
from trataAudio import transformarEAtualizar
from feed import makeFeed, pasta
from datetime import datetime as dt
import os
import tempfile
import pathlib
CONTROL_FILE = os.path.join(tempfile.gettempdir(),'oroneki_video_to_pod.oro')
def main():
if os.path.exists(CONTROL_FILE):
print('Já está rodando...')
return
pathlib.Path(CONTROL_FILE).touch()
print('Inicio')
try:
h1 = dt.now()
atualizaListaYouTube()
h2 = dt.now()
try:
baixarNovos(pasta_download)
except:
print('Erro no download')
h3 = dt.now()
transformarEAtualizar()
h4 = dt.now()
xml = makeFeed(pasta)
h5 = dt.now()
print('CONCLUIDO!')
print('INICIO :', h1, sep="\t")
print('TERMINO:', h5, sep ="\t")
print('baixar :', h3-h2, sep ="\t")
print('magica :', h4-h3, sep ="\t")
print('TEMPO :', h5-h1, sep ="\t")
end = os.environ.get('SERVER_END')
print(f'''\nACESSE EM:\n{end}/{xml}\n''')
except Exception as exc:
import sys
import logging
logging.basicConfig(format='[%(asctime)s %(lineno)3s] %(message)s', level="INFO", datefmt="%d/%m %H:%M:%S")
print(exc)
print('Erro!', sys.exc_info()[0])
logging.exception('')
finally:
os.remove(CONTROL_FILE)
print('FIM')
if __name__ == '__main__':
main()
| []
| []
| [
"SERVER_END"
]
| [] | ["SERVER_END"] | python | 1 | 0 | |
PyTorch/ImageNet/hessian_utils.py | """
This file contains some utility functions to calculate hessian matrix and its inverse.
Author: Chen Shangyu ([email protected])
"""
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from datetime import datetime
import tensorflow as tf
import os
import numpy as np
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# Construct hessian computing graph for res layer (conv layer without bias)
def create_res_hessian_computing_tf_graph(input_shape, layer_kernel, layer_stride):
"""
This function create the TensorFlow graph for computing hessian matrix for res layer.
Step 1: It first extract image patches using tf.extract_image_patches.
Step 2: Then calculate the hessian matrix by outer product.
Args:
input_shape: the dimension of input
layer_kernel: kernel size of the layer
layer_stride: stride of the layer
Output:
input_holder: TensorFlow placeholder for layer input
get_hessian_op: A TensorFlow operator to calculate hessian matrix
"""
input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape)
patches = tf.extract_image_patches(images = input_holder,
ksizes = [1,layer_kernel, layer_kernel,1],
strides = [1, layer_stride, layer_stride, 1],
rates = [1, 1, 1, 1],
padding = 'SAME')
print 'Patches shape: %s' %patches.get_shape()
a = tf.expand_dims(patches, axis=-1)
b = tf.expand_dims(patches, axis=3)
outprod = tf.multiply(a, b)
# print 'outprod shape: %s' %outprod.get_shape()
get_hessian_op = tf.reduce_mean(outprod, axis=[0, 1, 2])
print 'Hessian shape: %s' % get_hessian_op.get_shape()
return input_holder, get_hessian_op
# Construct hessian computing graph for fc layer
def create_fc_hessian_computing_tf_graph(input_shape):
"""
This function create the TensorFlow graph for computing hessian matrix for fully-connected layer.
Compared with create_res_hessian_computing_tf_graph, it does not need to extract patches.
"""
input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape)
a = tf.expand_dims(input_holder, axis=-1)
# Appending extra one for bias term
vect_w_b = tf.concat([a, tf.ones([tf.shape(a)[0], 1, 1])], axis=1)
outprod = tf.matmul(vect_w_b, vect_w_b, transpose_b=True)
# print 'outprod shape: %s' %outprod.get_shape()
get_hessian_op = tf.reduce_mean(outprod, axis=0)
print 'Hessian shape: %s' % get_hessian_op.get_shape()
return input_holder, get_hessian_op
# Construct hessian computing graph
def create_conv_hessian_computing_tf_graph(input_shape, layer_kernel, layer_stride):
"""
This function create the TensorFlow graph for computing hessian matrix for fully-connected layer.
Compared with create_res_hessian_computing_tf_graph, it append extract one for bias term.
"""
input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape)
patches = tf.extract_image_patches(images = input_holder,
ksizes = [1,layer_kernel, layer_kernel,1],
strides = [1, layer_stride, layer_stride, 1],
rates = [1, 1, 1, 1],
padding = 'SAME')
print 'Patches shape: %s' %patches.get_shape()
vect_w_b = tf.concat([patches, tf.ones([tf.shape(patches)[0], \
tf.shape(patches)[1], tf.shape(patches)[2], 1])], axis=3)
a = tf.expand_dims(vect_w_b, axis=-1)
b = tf.expand_dims(vect_w_b, axis=3)
outprod = tf.multiply(a, b)
# print 'outprod shape: %s' %outprod.get_shape()
get_hessian_op = tf.reduce_mean(outprod, axis=[0, 1, 2])
print 'Hessian shape: %s' % get_hessian_op.get_shape()
return input_holder, get_hessian_op
# Construct hessian inverse computing graph for Woodbury
def create_Woodbury_hessian_inv_graph(input_shape, dataset_size):
"""
This function create the hessian inverse calculation graph using Woodbury method.
"""
hessian_inv_holder = tf.placeholder(dtype=tf.float32, shape=[input_shape, input_shape])
input_holder = tf.placeholder(dtype=tf.float32, shape=[1, input_shape])
# [1, 4097] [4097, 4097] [4097, 1]
denominator = dataset_size + \
tf.matmul(a = tf.matmul(a = input_holder, b = hessian_inv_holder), b = input_holder, transpose_b=True)
# ([4097, 4097] [4097, 1]) ([1, 4097] [4097, 4097])
numerator = tf.matmul(a = tf.matmul(a = hessian_inv_holder, b = input_holder, transpose_b=True), \
b = tf.matmul(a = input_holder, b = hessian_inv_holder))
hessian_inv_op = hessian_inv_holder - numerator * (1.00 / denominator)
return hessian_inv_holder, input_holder, hessian_inv_op
def generate_hessian(net, trainloader, layer_name, layer_type, \
n_batch_used = 100, batch_size = 2, stride_factor = 3 ,use_cuda = True):
"""
This function generate hessian matrix for a given layer. Basically, what it does is:
Step 1: Extract layer input using PyTorch interface
Step 2: For convolution, res layer, extract patches using TensorFlow function
Step 3: Calculate hessian
Args:
net: PyTorch model
trainloader: PyTorch dataloader
layer_name:
layer_type: 'C' for Convolution (with bias), 'R' for res layer (without bias),
'F' for Fully-Connected (with bias). I am sure you will know why the bias term
is emphasized here as you are clever.
n_batch_used: number of batches used to generate hessian.
batch_size: Batch size. Because hessian calculation graph is quite huge. A small (like 2) number
of batch size if recommended here.
stride_factor: Due to the same reason mentioned above, bigger stride results in fewer extracted
image patches (think about how convolution works). stride_factor is multiplied by
actual stride in latter use. Therefore when stride_factor == 1, it extract patches in
original way. However, it may results in some GPU/CPU memory troubles. If you meet such,
you can increase the stride factor here.
use_cuda: whether you can use cuda or not.
Output:
Hessian matrix
"""
freq_moniter = (n_batch_used * batch_size) / 50 # Total 50 times of printing information
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
net.eval()
for batch_idx, (inputs, _) in enumerate(trainloader):
if use_cuda:
inputs = inputs.cuda()
net(Variable(inputs, volatile=True))
layer_input = net.module.layer_input[layer_name]
# In the begining, construct hessian graph
if batch_idx == 0:
print '[%s] Now construct generate hessian op for layer %s' %(datetime.now(), layer_name)
# res layer
if layer_type == 'R':
# Because PyTorch's data format (N,C,W,H) is different from tensorflow (N,W,H,C)
# layer input should be permuted to fit tensorflow
layer_input_np = layer_input.permute(0, 2, 3, 1).cpu().numpy()
layer_input_holder, generate_hessian_op = \
create_res_hessian_computing_tf_graph(layer_input_np.shape,
net.module.layer_kernel[layer_name],
net.module.layer_stride[layer_name] * stride_factor)
# check whether dimension is right
hessian_shape = int(generate_hessian_op.get_shape()[0])
print 'Hessian shape: %d' %hessian_shape
weight_shape = net.state_dict()['module.%s.weight' %layer_name].size()
# print ('Kernel shape: %s' %weight_shape)
# print weight_shape
kernel_unfold_shape = int(weight_shape[1]) * int(weight_shape[2]) * int(weight_shape[3])
print 'Kernel unfold shape: %d' %kernel_unfold_shape
assert(hessian_shape == kernel_unfold_shape)
# linear layer
elif layer_type == 'F':
layer_input_np = layer_input.cpu().numpy()
layer_input_holder, generate_hessian_op = \
create_fc_hessian_computing_tf_graph(layer_input_np.shape)
# check whether dimension is right
hessian_shape = int(generate_hessian_op.get_shape()[0])
print 'Hessian shape: %d' % hessian_shape
weight_shape = net.state_dict()['module.%s.weight' % layer_name].size()
print 'Weights shape: %d' % weight_shape[1]
assert(hessian_shape == weight_shape[1] + 1) # +1 because of bias
elif layer_type == 'C':
layer_input_np = layer_input.permute(0, 2, 3, 1).cpu().numpy()
layer_input_holder, generate_hessian_op = \
create_conv_hessian_computing_tf_graph(layer_input_np.shape,
net.module.layer_kernel[layer_name],
net.module.layer_stride[layer_name] * stride_factor)
# check whether dimension is right
hessian_shape = int(generate_hessian_op.get_shape()[0])
print 'Hessian shape: %d' %hessian_shape
weight_shape = net.state_dict()['module.%s.weight' %layer_name].size()
# print ('Kernel shape: %s' %weight_shape)
# print weight_shape
kernel_unfold_shape = int(weight_shape[1]) * int(weight_shape[2]) * int(weight_shape[3])
print 'Kernel unfold shape: %d' %kernel_unfold_shape
assert(hessian_shape == kernel_unfold_shape + 1)
print '[%s] %s Graph build complete.' % (datetime.now(), layer_name)
# Initialization finish, begin to calculate
if layer_type == 'C' or layer_type == 'R':
this_layer_input = layer_input.permute(0, 2, 3, 1).cpu().numpy()
elif layer_type == 'F':
this_layer_input = layer_input.cpu().numpy()
this_hessian = sess.run(generate_hessian_op,
feed_dict={layer_input_holder: this_layer_input})
if batch_idx == 0:
layer_hessian = this_hessian
else:
layer_hessian += this_hessian
if batch_idx % freq_moniter == 0:
print '[%s] Now finish image No. %d / %d' \
%(datetime.now(), batch_idx * batch_size, n_batch_used * batch_size)
if batch_idx == n_batch_used:
break
# net.train()
return (1.0 / n_batch_used) * layer_hessian
def generate_hessian_inv_Woodbury(net, trainloader, layer_name, layer_type, \
n_batch_used = 100, batch_size = 2, stride_factor = 3 , use_tf_backend = True, use_cuda = True):
"""
This function calculated Hessian inverse matrix by Woodbury matrix identity.
Args:
Please find the same parameters explanations above.
use_tf_backend: A TensorFlow wrapper is used to accelerate the process. True for using such wrapper.
"""
hessian_inverse = None
dataset_size = 0
freq_moniter = (n_batch_used * batch_size) / 50 # Total 50 times of printing information
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
net.eval()
for batch_idx, (inputs, _) in enumerate(trainloader):
if use_cuda:
inputs = inputs.cuda()
net(Variable(inputs, volatile=True))
layer_input = net.module.layer_input[layer_name]
# Construct tf op for convolution and res layer
if batch_idx == 0:
if layer_type == 'C' or layer_type == 'R':
print '[%s] Now construct patches extraction op for layer %s' %(datetime.now(), layer_name)
layer_input_np = layer_input.permute(0, 2, 3, 1).cpu().numpy()
layer_kernel = net.module.layer_kernel[layer_name]
layer_stride = net.module.layer_stride[layer_name] * stride_factor
layer_input_holder = tf.placeholder(dtype=tf.float32, shape=layer_input_np.shape)
get_patches_op = \
tf.extract_image_patches(images = layer_input_holder,
ksizes = [1, layer_kernel, layer_kernel,1],
strides = [1, layer_stride, layer_stride, 1],
rates = [1, 1, 1, 1],
padding = 'SAME')
# For a convolution input, extracted pathes would be: [1, 9, 9, 2304]
dataset_size = n_batch_used * int(get_patches_op.get_shape()[0]) * \
int(get_patches_op.get_shape()[1]) * int(get_patches_op.get_shape()[2])
input_dimension = get_patches_op.get_shape()[3]
if layer_type == 'C':
# In convolution layer, input dimension should be added one for bias term
hessian_inverse = 1000000 * np.eye(input_dimension + 1)
if use_tf_backend:
print ('You choose tf backend to calculate Woodbury, constructing your graph.')
hessian_inv_holder, input_holder, Woodbury_hessian_inv_op = \
create_Woodbury_hessian_inv_graph(input_dimension + 1, dataset_size)
else:
hessian_inverse = 1000000 * np.eye(input_dimension)
if use_tf_backend:
print ('You choose tf backend to calculate Woodbury, constructing your graph.')
hessian_inv_holder, input_holder, Woodbury_hessian_inv_op = \
create_Woodbury_hessian_inv_graph(input_dimension, dataset_size)
else:
layer_input_np = layer_input.cpu().numpy()
input_dimension = layer_input_np.shape[1]
dataset_size = n_batch_used * batch_size
hessian_inverse = 1000000 * np.eye(input_dimension + 1)
if use_tf_backend:
print ('You choose tf backend to calculate Woodbury, constructing your graph.')
hessian_inv_holder, input_holder, Woodbury_hessian_inv_op = \
create_Woodbury_hessian_inv_graph(input_dimension + 1, dataset_size)
print '[%s] dataset: %d, input dimension: %d' %(datetime.now(), dataset_size, input_dimension)
# Begin process
if layer_type == 'F':
this_layer_input = layer_input.cpu().numpy() # [2, 4096]
for i in range(this_layer_input.shape[0]):
this_input = this_layer_input[i]
# print this_input.shape
# print np.array([1.0]).shape
wb = np.concatenate([this_input.reshape(1,-1), np.array([1.0]).reshape(1,-1)], axis = 1) # [1, 4097]
if use_tf_backend:
hessian_inverse = sess.run(Woodbury_hessian_inv_op, feed_dict={
hessian_inv_holder: hessian_inverse,
input_holder: wb
})
else:
# [1, 4097] [4097, 4097] [4097, 1]
denominator = dataset_size + np.dot(np.dot(wb,hessian_inverse), wb.T)
# [4097, 4097] [4097, 1] [1, 4097] [4097, 4097]
numerator = np.dot(np.dot(hessian_inverse, wb.T), np.dot(wb,hessian_inverse))
hessian_inverse = hessian_inverse - numerator * (1.0 / denominator)
elif layer_type == 'C' or layer_type == 'R':
this_layer_input = layer_input.permute(0, 2, 3, 1).cpu().numpy()
this_patch = sess.run(get_patches_op, feed_dict={layer_input_holder: this_layer_input})
for i in range(this_patch.shape[0]):
for j in range(this_patch.shape[1]):
for m in range(this_patch.shape[2]):
this_input = this_patch[i][j][m]
if layer_type == 'C':
wb = np.concatenate([this_input.reshape(1,-1), np.array([1.0]).reshape(1,-1)], axis = 1) # [1, 2305]
else:
wb = this_input.reshape(1, -1) # [1, 2304]
if use_tf_backend:
hessian_inverse = sess.run(Woodbury_hessian_inv_op, feed_dict={
hessian_inv_holder: hessian_inverse,
input_holder: wb
})
else:
denominator = dataset_size + np.dot(np.dot(wb,hessian_inverse), wb.T)
numerator = np.dot(np.dot(hessian_inverse, wb.T), np.dot(wb,hessian_inverse))
hessian_inverse = hessian_inverse - numerator * (1.0 / denominator)
if batch_idx % freq_moniter == 0:
print '[%s] Now finish image No. %d / %d' \
%(datetime.now(), batch_idx * batch_size, n_batch_used * batch_size)
if batch_idx == n_batch_used:
sess.close()
break
return hessian_inverse | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
private/signer/v2computing/v2computing_test.go | package v2computing
import (
"bytes"
"net/http"
"net/url"
"os"
"testing"
"time"
"github.com/kzmake/nifcloud-sdk-go/nifcloud"
"github.com/kzmake/nifcloud-sdk-go/nifcloud/credentials"
"github.com/kzmake/nifcloud-sdk-go/nifcloud/request"
"github.com/kzmake/nifcloud-sdk-go/awstesting"
)
type signerBuilder struct {
ServiceName string
Region string
SignTime time.Time
Query url.Values
Method string
SessionToken string
}
func (sb signerBuilder) BuildSigner() signer {
endpoint := "https://" + sb.ServiceName + "." + sb.Region + ".amazonaws.com"
var req *http.Request
if sb.Method == "POST" {
body := []byte(sb.Query.Encode())
reader := bytes.NewReader(body)
req, _ = http.NewRequest(sb.Method, endpoint, reader)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", string(len(body)))
} else {
req, _ = http.NewRequest(sb.Method, endpoint, nil)
req.URL.RawQuery = sb.Query.Encode()
}
sig := signer{
Request: req,
Time: sb.SignTime,
Credentials: credentials.NewStaticCredentials(
"AKID",
"SECRET",
sb.SessionToken),
}
if os.Getenv("DEBUG") != "" {
sig.Debug = nifcloud.LogDebug
sig.Logger = nifcloud.NewDefaultLogger()
}
return sig
}
func TestSignRequestWithAndWithoutSession(t *testing.T) {
// have to create more than once, so use a function
newQuery := func() url.Values {
query := make(url.Values)
query.Add("Action", "CreateDomain")
query.Add("DomainName", "TestDomain-1437033376")
query.Add("Version", "2009-04-15")
return query
}
// create request without a SecurityToken (session) in the credentials
query := newQuery()
timestamp := time.Date(2015, 7, 16, 7, 56, 16, 0, time.UTC)
builder := signerBuilder{
Method: "POST",
ServiceName: "sdb",
Region: "ap-southeast-2",
SignTime: timestamp,
Query: query,
}
signer := builder.BuildSigner()
err := signer.Sign()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if e, a := "tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.signature; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := 8, len(signer.Query); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "AKID", signer.Query.Get("AccessKeyId"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "2015-07-16T07:56:16Z", signer.Query.Get("Timestamp"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "HmacSHA256", signer.Query.Get("SignatureMethod"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "2", signer.Query.Get("SignatureVersion"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.Query.Get("Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "CreateDomain", signer.Query.Get("Action"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "TestDomain-1437033376", signer.Query.Get("DomainName"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "2009-04-15", signer.Query.Get("Version"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
// should not have a SecurityToken parameter
_, ok := signer.Query["SecurityToken"]
if ok {
t.Errorf("expect SecurityToken found, was not")
}
// now sign again, this time with a security token (session)
query = newQuery()
builder.SessionToken = "SESSION"
signer = builder.BuildSigner()
err = signer.Sign()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if e, a := "Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.signature; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := 9, len(signer.Query); e != a { // expect one more parameter
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.Query.Get("Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "SESSION", signer.Query.Get("SecurityToken"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestMoreComplexSignRequest(t *testing.T) {
query := make(url.Values)
query.Add("Action", "PutAttributes")
query.Add("DomainName", "TestDomain-1437041569")
query.Add("Version", "2009-04-15")
query.Add("Attribute.2.Name", "Attr2")
query.Add("Attribute.2.Value", "Value2")
query.Add("Attribute.2.Replace", "true")
query.Add("Attribute.1.Name", "Attr1-%\\+ %")
query.Add("Attribute.1.Value", " \tValue1 +!@#$%^&*(){}[]\"';:?/.>,<\x12\x00")
query.Add("Attribute.1.Replace", "true")
query.Add("ItemName", "Item 1")
timestamp := time.Date(2015, 7, 16, 10, 12, 51, 0, time.UTC)
builder := signerBuilder{
Method: "POST",
ServiceName: "sdb",
Region: "ap-southeast-2",
SignTime: timestamp,
Query: query,
SessionToken: "SESSION",
}
signer := builder.BuildSigner()
err := signer.Sign()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if e, a := "WNdE62UJKLKoA6XncVY/9RDbrKmcVMdQPQOTAs8SgwQ=", signer.signature; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestGet(t *testing.T) {
svc := awstesting.NewClient(&nifcloud.Config{
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
Region: nifcloud.String("ap-southeast-2"),
})
r := svc.NewRequest(
&request.Operation{
Name: "OpName",
HTTPMethod: "GET",
HTTPPath: "/",
},
nil,
nil,
)
r.Build()
if e, a := "GET", r.HTTPRequest.Method; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "", r.HTTPRequest.URL.Query().Get("Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
SignSDKRequest(r)
if r.Error != nil {
t.Fatalf("expect no error, got %v", r.Error)
}
t.Logf("Signature: %s", r.HTTPRequest.URL.Query().Get("Signature"))
if len(r.HTTPRequest.URL.Query().Get("Signature")) == 0 {
t.Errorf("expect signature to be set, was not")
}
}
func TestAnonymousCredentials(t *testing.T) {
svc := awstesting.NewClient(&nifcloud.Config{
Credentials: credentials.AnonymousCredentials,
Region: nifcloud.String("ap-southeast-2"),
})
r := svc.NewRequest(
&request.Operation{
Name: "PutAttributes",
HTTPMethod: "POST",
HTTPPath: "/",
},
nil,
nil,
)
r.Build()
SignSDKRequest(r)
req := r.HTTPRequest
req.ParseForm()
if a := req.PostForm.Get("Signature"); len(a) != 0 {
t.Errorf("expect no signature, got %v", a)
}
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
LambdaAP-EIP+RouteFailover/6.0/healthcheck.py | import os
import sys
import socket
import datetime
import logging
import boto3
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ec2client = boto3.client('ec2', endpoint_url=os.environ['VPCEndpointURL'])
class get_instanceinfo(object):
def __init__(self, vpcid, hapairvalue):
self.active_id = ''
self.active_eni0_id = ''
self.active_eni0_ip = ''
self.active_eni0_ip_map = {}
self.active_eni0_route_map = {}
self.active_eni1_id = ''
self.active_eni1_ip = ''
self.active_eni1_route_map = {}
self.passive_id = ''
self.passive_eni0_id = ''
self.passive_eni0_ip = ''
self.passive_eni0_ip_map = {}
self.passive_eni0_route_map = {}
self.passive_eni1_id = ''
self.passive_eni1_ip = ''
self.passive_eni1_route_map = {}
try:
ec2search = ec2client.describe_instances(Filters=[{'Name':'vpc-id', 'Values':[vpcid]}, {'Name':'tag:ha:pair', 'Values':[hapairvalue]}])
if ec2search['ResponseMetadata']['HTTPStatusCode'] != int(200):
logger.error('<--!! Error seen when searching for instances in vpc {} with matching ha:pair tag value {}: {}'.format(vpcid, hapairvalue, ec2search))
except Exception as error:
logger.error('<--!! Exception in describe_instances step in get_instanceinfo: {}'.format(error))
sys.exit()
instance_ids = []
for result in ec2search['Reservations']:
for hit in result['Instances']:
if 'InstanceId' in hit:
instance_ids.append(hit['InstanceId'])
if len(instance_ids) != 2:
logger.error('<--!! The number of instances found in vpc {} with matching ha:pair tag value {} does not equal 2! {}'.format(vpcid, hapairvalue, instance_ids))
sys.exit()
else:
logger.info('--> Found matching instances: {}'.format(instance_ids))
for instances in ec2search['Reservations']:
for instance in instances['Instances']:
count1, count2, count3 = 0, 0, 0
for tag in instance['Tags']:
if 'ha:status' in tag['Key'] and 'active' in tag['Value']:
self.active_id = instance['InstanceId']
for eni in instance['NetworkInterfaces']:
if eni['Attachment']['DeviceIndex'] == 0:
self.active_eni0_id = eni['NetworkInterfaceId']
self.active_eni0_ip = eni['PrivateIpAddress']
for addr in eni['PrivateIpAddresses']:
if addr['Primary'] is False:
count1 += 1
self.active_eni0_ip_map[count1] = [eni['NetworkInterfaceId'], addr['PrivateIpAddress']]
if eni['Attachment']['DeviceIndex'] == 1:
self.active_eni1_id = eni['NetworkInterfaceId']
self.active_eni1_ip = eni['PrivateIpAddress']
rts = ec2client.describe_route_tables(Filters=[{'Name':'vpc-id', 'Values':[vpcid]}, {'Name':'route.instance-id', 'Values':[self.active_id]}])
for rt in rts['RouteTables']:
for route in rt['Routes']:
if 'InstanceId' in route:
if self.active_id in route['InstanceId'] and self.active_eni0_id in route['NetworkInterfaceId']:
count2 += 1
self.active_eni0_route_map[count2] = [rt['RouteTableId'], route['DestinationCidrBlock'], route['NetworkInterfaceId']]
if self.active_id in route['InstanceId'] and self.active_eni1_id in route['NetworkInterfaceId']:
count3 += 1
self.active_eni1_route_map[count3] = [rt['RouteTableId'], route['DestinationCidrBlock'], route['NetworkInterfaceId']]
if 'ha:status' in tag['Key'] and 'passive' in tag['Value']:
self.passive_id = instance['InstanceId']
for eni in instance['NetworkInterfaces']:
if eni['Attachment']['DeviceIndex'] == 0:
self.passive_eni0_id = eni['NetworkInterfaceId']
self.passive_eni0_ip = eni['PrivateIpAddress']
for addr in eni['PrivateIpAddresses']:
if addr['Primary'] is False:
count1 += 1
self.passive_eni0_ip_map[count1] = [eni['NetworkInterfaceId'], addr['PrivateIpAddress']]
if eni['Attachment']['DeviceIndex'] == 1:
self.passive_eni1_id = eni['NetworkInterfaceId']
self.passive_eni1_ip = eni['PrivateIpAddress']
rts = ec2client.describe_route_tables(Filters=[{'Name':'vpc-id', 'Values':[vpcid]}, {'Name':'route.instance-id', 'Values':[self.passive_id]}])
for rt in rts['RouteTables']:
for route in rt['Routes']:
if 'InstanceId' in route:
if self.passive_id in route['InstanceId'] and self.passive_eni0_id in route['NetworkInterfaceId']:
count2 += 1
self.passive_eni0_route_map[count2] = [rt['RouteTableId'], route['DestinationCidrBlock'], route['NetworkInterfaceId']]
if self.passive_id in route['InstanceId'] and self.passive_eni1_id in route['NetworkInterfaceId']:
count3 += 1
self.passive_eni1_route_map[count3] = [rt['RouteTableId'], route['DestinationCidrBlock'], route['NetworkInterfaceId']]
class get_hc_status(object):
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.status = None
logger.debug('--> Checking Host+Port {}:{}'.format(self.ip, self.port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect((self.ip, int(self.port)))
s.shutdown(2)
self.status = True
logger.info('<-- Host+Port {}:{} is UP = {}'.format(self.ip, self.port, self.status))
except Exception as error:
s.close()
self.status = False
logger.error('<--!! Exception in get_hc_status: {}'.format(error))
logger.info('<-- Host+Port {}:{} is UP = {}'.format(self.ip, self.port, self.status))
def reassign_eips(activemap, passivemap):
logger.debug('--> Updating EIPs to target new active instance')
if len(activemap) != len(passivemap):
logger.error('<--!! Instances ENI0 secondaryIP count does not match!')
logger.error('<--!! This will likely affect EIP reassignment!')
try:
for key in activemap:
avalue = activemap[key]
pvalue = passivemap[key]
logger.debug('--> Found matching ENI0 secondaryIPs {} to {}'.format(avalue, pvalue))
aeni, aip = avalue
peni, pip = pvalue
try:
aeip = ec2client.describe_addresses(Filters=[{'Name':'network-interface-id', 'Values':[aeni]}, {'Name':'private-ip-address', 'Values':[aip]}])
except Exception as error:
logger.error('<--!! Exception in describe_addresses step in reassign_eips: {}'.format(error))
if aeip['ResponseMetadata']['HTTPStatusCode'] == int(200):
if aeip['Addresses']:
try:
response = ec2client.associate_address(AllowReassociation=True, AllocationId=aeip['Addresses'][0]['AllocationId'], NetworkInterfaceId=peni, PrivateIpAddress=pip)
if response['ResponseMetadata']['HTTPStatusCode'] == int(200):
logger.info('--> Updated {} to target {} {}'.format(aeip['Addresses'][0]['PublicIp'], peni, pip))
else:
logger.error('<--!! Error seen when updating {} to target {} {}: {}'.format(aeip['Addresses'][0]['PublicIp'], peni, pip, response))
except Exception as error:
logger.error('<--!! Exception in associate_address step in reassign_eips: {}'.format(error))
else:
logger.error('<--!! Error seen when updating {} to target {} {}: {}'.format(aeip['Addresses'][0]['PublicIp'], peni, pip, aeip))
except Exception as error:
logger.error('<--!! Exception in reassign_eips: {}'.format(error))
def replace_routes(map, eni):
logger.debug('--> Updating routes to target {}'.format(eni))
try:
for key, value in map.iteritems():
mrt, mroute, meni = value
if eni:
response = ec2client.replace_route(NetworkInterfaceId=eni, RouteTableId=mrt, DestinationCidrBlock=mroute)
if response['ResponseMetadata']['HTTPStatusCode'] == int(200):
logger.info('--> Updated {} in rt {} to target {}'.format(mroute, mrt, eni))
else:
logger.error('<--!! Error seen when updating {} in rt {} to target {}: {}'.format(mroute, mrt, eni, response))
except Exception as error:
logger.error('<--!! Exception in replace_routes: {}'.format(error))
def update_tags(activeid, passiveid):
try:
if activeid:
ec2client.create_tags(Resources=[activeid], Tags=[{'Key': 'ha:status', 'Value': 'active'}])
ec2client.create_tags(Resources=[activeid], Tags=[{'Key': 'ha:time', 'Value': str(datetime.datetime.now())}])
logger.info('--> Updated tags for active instance: {}'.format(activeid))
if passiveid:
ec2client.create_tags(Resources=[passiveid], Tags=[{'Key': 'ha:status', 'Value': 'passive'}])
ec2client.create_tags(Resources=[passiveid], Tags=[{'Key': 'ha:time', 'Value': str(datetime.datetime.now())}])
logger.info('--> Updated tags for passive instance: {}'.format(passiveid))
except Exception as error:
logger.error('<--!! Exception in update_tags: {}'.format(error))
def lambda_handler(event, context):
logger.info('-=-' * 20)
if 'source' in event:
if 'aws.events' in event['source']:
logger.info('>> Triggered by CloudWatch Scheduled Event <<')
if 'data' in event:
if 'stitch' in event['data']:
logger.info('>> Triggered by FortiOS Stitch Action <<')
if os.environ['VPCID'] and os.environ['HAPairValue'] and os.environ['HealthCheckPort'].isdigit():
ha = get_instanceinfo(os.environ['VPCID'], os.environ['HAPairValue'])
else:
logger.error('<--!! Exception in environment variables: VPCID should be a single VPC ID value')
logger.error('<--!! Exception in environment variables: HAPairValue should be a single string value following AWS tag value restrictions')
logger.error('<--!! Exception in environment variables: HealthCheckPort should be a single tcp port number')
sys.exit()
if ha.active_eni1_ip and ha.passive_eni1_ip:
hcactive = get_hc_status(ha.active_eni1_ip, os.environ['HealthCheckPort'])
hcpassive = get_hc_status(ha.passive_eni1_ip, os.environ['HealthCheckPort'])
else:
hcactive = get_hc_status(ha.active_eni0_ip, os.environ['HealthCheckPort'])
hcpassive = get_hc_status(ha.passive_eni0_ip, os.environ['HealthCheckPort'])
if hcactive.status is True:
if hcpassive.status is True:
logger.info('-->> Active is up, Passive is up: Checking routes point to Active')
if hcpassive.status is False:
logger.info('-->> Active is up, Passive is down: Checking routes point to Active')
if ha.passive_eni0_route_map:
logger.error('!!-->> Found routes pointing to Passive ENI0: Moving routes to Active')
replace_routes(ha.passive_eni0_route_map, ha.active_eni0_id)
update_tags(ha.active_id, ha.passive_id)
if ha.passive_eni1_route_map:
logger.error('!!-->> Found routes pointing to Passive ENI1: Moving routes to Active')
replace_routes(ha.passive_eni1_route_map, ha.active_eni1_id)
update_tags(ha.active_id, ha.passive_id)
elif hcactive.status is False and hcpassive.status is True:
logger.error('-->> Active is down but Passive is up: Moving EIPs & routes to Passive')
logger.error('-->> Triggering_CloudWatch_Failover_Alarm')
reassign_eips(ha.active_eni0_ip_map, ha.passive_eni0_ip_map)
replace_routes(ha.active_eni0_route_map, ha.passive_eni0_id)
replace_routes(ha.active_eni1_route_map, ha.passive_eni1_id)
update_tags(ha.passive_id, ha.active_id)
elif hcactive.status is False and hcpassive.status is False:
logger.error('!!-->> Both units are down: Bypassing EIP & route checks')
logger.info('-=-' * 20)
#
# end of script
#
| []
| []
| [
"HAPairValue",
"VPCEndpointURL",
"HealthCheckPort",
"VPCID"
]
| [] | ["HAPairValue", "VPCEndpointURL", "HealthCheckPort", "VPCID"] | python | 4 | 0 | |
test/functional/feature_taproot.py | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Tokyocoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test Taproot softfork (BIPs 340-342)
from test_framework.blocktools import (
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
ToHex,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.test_framework import TokyocoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
sha256,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_inner(ctx):
"""Default expression for "pubkey_inner": tap.inner_pubkey."""
return get(ctx, "tap").inner_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_inner, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_inner") + get(ctx, "merklebranch")
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The inner pubkey for a taproot script path spend (32 bytes).
"pubkey_inner": default_pubkey_inner,
# The negation flag of the inner pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_0, pubkeyhash])
conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = CScript([OP_0, sha256(script)])
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL])
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
return bytes(CScript([pubkey, OP_CHECKSIG]))
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if (opcode == OP_CHECKSIGVERIFY):
ret = CScript([pubkey, opcode, OP_1])
elif (opcode == OP_CHECKSIGADD):
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the inner pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_inner": bitflipper(default_pubkey_inner)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
"""Spenders for testing that pre-activation Taproot rules don't apply."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(TokyocoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Node 0 has Taproot inactive, Node 1 active.
self.extra_args = [["-par=1", "-vbparams=taproot:1:1"], ["-par=1"]]
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if (accept):
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(ToHex(fund_tx))["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(101)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Transfer value of the largest 500 coins to pre-taproot node.
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
# Mine a block with the transaction
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
# Pre-taproot activation tests.
self.log.info("Pre-activation tests...")
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
| []
| []
| [
"TEST_DUMP_DIR"
]
| [] | ["TEST_DUMP_DIR"] | python | 1 | 0 | |
python/zzz/v3-group_feat_cnn_tran/models/cnn_mm_tran.py | import os
import numpy as np
from time import time
import tensorflow as tf
from keras import backend as K
from sklearn.metrics import accuracy_score
from utils.evaluators import BCubeEvaluator
from keras.utils.np_utils import to_categorical
from structures.collections import MentionCluster
from keras.models import Model, save_model, load_model
from components.features import MentionPairFeatureExtractor
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers import Input, Reshape, Flatten, Dense, merge, Dropout
tf.logging.set_verbosity(tf.logging.WARN)
tf.python.control_flow_ops = tf
# LRS: Left-Reduced-Shift, P: Pass, S: Shift (Head mention), R: Reduce (Singleton)
class LABELS(object):
LRS, P, R, S, nb_labels = range(5)
_m = {0: 'LRS', 1: 'P', 2: 'R', 3: 'S', 'LRS': 0, 'P': 1, 'R': 2, 'S': 3}
@staticmethod
def to_class_id(label):
return LABELS._m.get(label.upper(), None)
@staticmethod
def to_class_label(class_id):
return LABELS._m.get(int(class_id), None)
class MentionMentionCNN:
def __init__(self, nb_embs, embdim, embftdim, dftdim, nb_filters, gpu_id=-1):
self.nb_embs, self.embdim, self.embftdim, self.dftdim = nb_embs, embdim, embftdim, dftdim
if gpu_id >= 0:
gpu_options = tf.GPUOptions(visible_device_list=str(gpu_id), allow_growth=True)
K.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
else:
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
with tf.device('/cpu:0' if gpu_id < 0 else '/gpu:0'):
# Input layers
mm_inp_dft = Input(shape=(dftdim,))
m1_inp_embs = [Input(shape=(1, nb_emb, embdim)) for nb_emb in nb_embs]
m2_inp_embs = [Input(shape=(1, nb_emb, embdim)) for nb_emb in nb_embs]
m1_inp_ebft, m2_inp_ebft = Input(shape=(embftdim,)), Input(shape=(embftdim,))
# Convolution + Pooling layers
convs_1r, convs_2r, convs_3r, pools_1r, pools_2r, pools_3r = [], [], [], [], [], []
for nb_emb in nb_embs:
convs_1r.append(Convolution2D(nb_filters, 1, embdim, activation='tanh'))
convs_2r.append(Convolution2D(nb_filters, 2, embdim, activation='tanh'))
convs_3r.append(Convolution2D(nb_filters, 3, embdim, activation='tanh'))
pools_1r.append(MaxPooling2D(pool_size=(nb_emb - 0, 1)))
pools_2r.append(MaxPooling2D(pool_size=(nb_emb - 1, 1)))
pools_3r.append(MaxPooling2D(pool_size=(nb_emb - 2, 1)))
# Mention-mention embedding vectors
reshape, dropout = Reshape((nb_filters,)), Dropout(0.8)
m1_emb_vec_1r, m1_emb_vec_2r, m1_emb_vec_3r, m2_emb_vec_1r, m2_emb_vec_2r, m2_emb_vec_3r = [], [], [], [], [], []
for m1_inp, m2_inp, conv_1r, conv_2r, conv_3r, pool_1r, pool_2r, pool_3r in \
zip(m1_inp_embs, m2_inp_embs, convs_1r, convs_2r, convs_3r, pools_1r, pools_2r, pools_3r):
m1_emb_vec_1r.append(dropout(reshape(pool_1r(conv_1r(m1_inp)))))
m1_emb_vec_2r.append(dropout(reshape(pool_2r(conv_2r(m1_inp)))))
m1_emb_vec_3r.append(dropout(reshape(pool_3r(conv_3r(m1_inp)))))
m2_emb_vec_1r.append(dropout(reshape(pool_1r(conv_1r(m2_inp)))))
m2_emb_vec_2r.append(dropout(reshape(pool_2r(conv_2r(m2_inp)))))
m2_emb_vec_3r.append(dropout(reshape(pool_3r(conv_3r(m2_inp)))))
nb_rows = sum([len(m1_emb_vec_1r), len(m1_emb_vec_2r), len(m1_emb_vec_3r)])
m1_matrix_emb = Reshape((1, nb_rows, nb_filters))(merge(m1_emb_vec_1r + m1_emb_vec_2r + m1_emb_vec_3r, mode='concat'))
m2_matrix_emb = Reshape((1, nb_rows, nb_filters))(merge(m2_emb_vec_1r + m2_emb_vec_2r + m2_emb_vec_3r, mode='concat'))
conv_m = Convolution2D(nb_filters, 1, nb_filters, activation='tanh')
pool_m = MaxPooling2D(pool_size=(nb_rows, 1))
m1_vec = merge([Reshape((nb_filters,))(pool_m(conv_m(m1_matrix_emb))), m1_inp_ebft], mode='concat')
m2_vec = merge([Reshape((nb_filters,))(pool_m(conv_m(m2_matrix_emb))), m2_inp_ebft], mode='concat')
mm_matrix_vec = Reshape((1, 2, nb_filters+embftdim))(merge([m1_vec, m2_vec], mode='concat'))
conv_mm = Convolution2D(nb_filters, 1, nb_filters, activation='tanh')
pool_mm = MaxPooling2D(pool_size=(2, 1))
mm_vec = merge([Flatten()(pool_mm(conv_mm(mm_matrix_vec))), mm_inp_dft], mode='concat')
# Regression
probs = Dense(LABELS.nb_labels, activation="sigmoid")(mm_vec)
# Model compilation
self.model = Model(input=m1_inp_embs + m2_inp_embs + [m1_inp_ebft, m2_inp_ebft, mm_inp_dft], output=probs)
self.model.compile(loss='categorical_crossentropy', optimizer='RMSprop')
def fit(self, mentions_trn, mentions_dev, trn_cluster_docs_gold, dev_cluster_docs_gold,
Xtrn, Ytrn, Xdev, Ydev, eval_every=1, nb_epoch=20, batch_size=32, model_out=None):
best_trn_scores, best_dev_scores, best_epoch, total_time = ([0]*3, [0]*3, 0, 0)
decoder = MentionMentionCNNDecoder()
evaluator = BCubeEvaluator()
for e in range(nb_epoch/eval_every):
global_start_time = local_start_time = time()
self.model.fit(Xtrn, to_categorical(Ytrn, LABELS.nb_labels),
batch_size=batch_size, nb_epoch=eval_every, shuffle=True, verbose=0)
trn_time = time() - local_start_time
trn_accuracy = accuracy_score(Ytrn, self.predict(Xtrn))
dev_accuracy = accuracy_score(Ydev, self.predict(Xdev))
local_start_time = time()
trn_cluster_docs_pred = decoder.decode(self, mentions_trn)
trn_p, trn_r, trn_f1 = evaluator.evaluate_docs(trn_cluster_docs_gold, trn_cluster_docs_pred)
dev_cluster_docs_pred = decoder.decode(self, mentions_dev)
dev_p, dev_r, dev_f1 = evaluator.evaluate_docs(dev_cluster_docs_gold, dev_cluster_docs_pred)
decode_time = time() - local_start_time
if best_dev_scores[2] < dev_f1:
best_epoch = e
best_dev_scores = [dev_p, dev_r, dev_f1]
best_trn_scores = [trn_p, trn_r, trn_f1]
if model_out is not None:
self.save_model(model_out)
lapse = time() - global_start_time
total_time += lapse
print 'Epoch %3d - Trn Accu(P/R/F): %.4f(%.4f/%.4f/%.4f), Dev Accu(P/R/F): %.4f(%.4f/%.4f/%.4f) - %4.2fs'\
% ((e+1)*eval_every, trn_accuracy, trn_p, trn_r, trn_f1, dev_accuracy, dev_p, dev_r, dev_f1, lapse)
print '\tTime breakdown: Train %.2f, Decode %.2f' % (trn_time, decode_time)
'\nTraining Summary:'
print 'Best epoch: %d, Trn P/R/F: %.6f/%.6f/%.6f, Dev P/R/F : %.6f/%.6f/%.6f - %4.2fs' % \
((best_epoch+1)*eval_every, best_trn_scores[0], best_trn_scores[1], best_trn_scores[2],
best_dev_scores[0], best_dev_scores[1], best_dev_scores[2], total_time)
if model_out is not None:
print 'Model saved to %s' % model_out
def decode(self, mention_docs):
return MentionMentionCNNDecoder().decode(self, mention_docs)
def predict(self, Xtst):
return np.argmax(self.model.predict(Xtst), axis=-1)
def load_model(self, file_path):
try:
self.model = load_model(file_path)
except IOError:
raise IOError("Can't load model file %s" % file_path)
def save_model(self, file_path):
save_model(self.model, file_path)
class MentionMentionCNNDecoder(object):
def __init__(self):
self.mm_extractor = MentionPairFeatureExtractor()
def decode(self, model, mention_docs):
cluster_docs, label_counts = [], dict()
for mentions in mention_docs:
m_m2cluster, m_m2fs, valid_prev = dict(), dict(), [mentions[0]]
m_m2cluster[mentions[0]] = MentionCluster([mentions[0]])
for m in mentions:
emb = [eb.reshape(1, 1, neb, model.embdim) for eb, neb in zip(m.embedding, model.nb_embs)]
ebft = m.feature.reshape(1, model.embftdim)
m_m2fs[m] = (emb, ebft)
for curr in mentions[1:]:
curr_emb, curr_ebft = m_m2fs[curr]
target, v_count = None, 0
for prev in reversed(valid_prev):
v_count += 1
prev_emb, prev_ebft = m_m2fs[prev]
mm_dft = self.mm_extractor.extract((prev, curr)).reshape(1, model.dftdim)
instance = prev_emb + curr_emb + [prev_ebft, curr_ebft, mm_dft]
label = np.asscalar(model.predict(instance)[0])
label_counts[label] = label_counts.get(label, 0) + 1
if label is not LABELS.P:
if label == LABELS.LRS:
target = m_m2cluster[prev]
valid_prev.append(curr)
elif label == LABELS.S:
valid_prev.append(curr)
break
# If labels for previous mentions are all "PASS"
if v_count == len(valid_prev):
valid_prev.append(curr)
if target is None:
target = MentionCluster()
target.append(curr)
m_m2cluster[curr] = target
cluster_docs.append(list(set(m_m2cluster.values())))
print dict([(LABELS.to_class_label(e[0]), e[1]) for e in label_counts.items()])
return cluster_docs
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tests/tiling/explosive_source.py | """
This is an explicit DG method: we invert the mass matrix and perform
a matrix-vector multiplication to get the solution in a time step
"""
from math import *
import mpi4py
import numpy as np
from time import time
import sys
import os
import cProfile
from firedrake import *
from firedrake.petsc import PETSc
from pyop2.utils import cached_property
from pyop2.profiling import timed_region
from pyop2.base import _trace, Dat, DataSet
from pyop2.fusion.interface import loop_chain
from pyop2.logger import info, set_log_level, INFO
import coffee.base as ast
from utils import parser, output_time, calculate_sdepth, FusionSchemes
class ElasticLF4(object):
r"""An elastic wave equation solver, using the finite element method
for spatial discretisation, and a fourth-order leap-frog time-stepping scheme."""
loop_chain_length = 28
num_solves = 8
def __init__(self, mesh, family, degree, dimension, output=1, params=None):
r""" Initialise a new elastic wave simulation.
:param mesh: The underlying computational mesh of vertices and edges.
:param str family: Specify whether CG or DG should be used.
:param int degree: Use polynomial basis functions of this degree.
:param int dimension: The spatial dimension of the problem (1, 2 or 3).
:param int output: period, in timesteps, to write solution fields to a file.
:param dict params: simulation and optimisation parameters
:returns: None
"""
self.degree = degree
self.mesh = mesh
self.dimension = dimension
self.output = output
self.tofile = params['tofile']
self.S = TensorFunctionSpace(mesh, family, degree, name='S')
self.U = VectorFunctionSpace(mesh, family, degree, name='U')
# Assumes that the S and U function spaces are the same.
self.S_tot_dofs = op2.MPI.COMM_WORLD.allreduce(self.S.dof_count, op=mpi4py.MPI.SUM)
self.U_tot_dofs = op2.MPI.COMM_WORLD.allreduce(self.U.dof_count, op=mpi4py.MPI.SUM)
info("Number of degrees of freedom (Velocity): %d" % self.U_tot_dofs)
info("Number of degrees of freedom (Stress): %d" % self.S_tot_dofs)
self.s = TrialFunction(self.S)
self.v = TestFunction(self.S)
self.u = TrialFunction(self.U)
self.w = TestFunction(self.U)
self.s0 = Function(self.S, name="StressOld")
self.sh1 = Function(self.S, name="StressHalf1")
self.stemp = Function(self.S, name="StressTemp")
self.sh2 = Function(self.S, name="StressHalf2")
self.s1 = Function(self.S, name="StressNew")
self.u0 = Function(self.U, name="VelocityOld")
self.uh1 = Function(self.U, name="VelocityHalf1")
self.utemp = Function(self.U, name="VelocityTemp")
self.uh2 = Function(self.U, name="VelocityHalf2")
self.u1 = Function(self.U, name="VelocityNew")
self.absorption_function = None
self.source_function = None
self.source_expression = None
self._dt = None
self._density = None
self._mu = None
self._l = None
self.n = FacetNormal(self.mesh)
self.I = Identity(self.dimension)
# Tiling options
self.tiling_size = params['tile_size']
self.tiling_uf = params['num_unroll']
self.tiling_mode = params['mode']
self.tiling_halo = params['extra_halo']
self.tiling_explicit = params['explicit_mode']
self.tiling_explicit_id = params['explicit_mode_id']
self.tiling_log = params['log']
self.tiling_sdepth = params['s_depth']
self.tiling_part = params['partitioning']
self.tiling_coloring = params['coloring']
self.tiling_glb_maps = params['use_glb_maps']
self.tiling_prefetch = params['use_prefetch']
# Mat-vec AST cache
self.asts = {}
if self.tofile:
# File output streams
platform = os.environ.get('NODENAME', 'unknown')
tmpdir = os.environ['TMPDIR']
base = os.path.join(tmpdir, 'output', platform,
'p%d' % self.degree, 'uf%d' % self.tiling_uf)
if op2.MPI.COMM_WORLD.rank == 0:
if not os.path.exists(base):
os.makedirs(base)
sub_dirs = [d for d in os.listdir(base)
if os.path.isdir(os.path.join(base, d))]
sub_dir = "%d_em%d_part%s_tile%s" % (len(sub_dirs),
self.tiling_explicit_id,
self.tiling_size if self.tiling_uf else 0,
self.tiling_part if self.tiling_uf else 'None')
base = os.path.join(base, sub_dir)
os.makedirs(base)
op2.MPI.COMM_WORLD.barrier()
base = op2.MPI.COMM_WORLD.bcast(base, root=0)
self.u_stream = File(os.path.join(base, 'velocity.pvd'))
self.s_stream = File(os.path.join(base, 'stress.pvd'))
@property
def absorption(self):
r""" The absorption coefficient :math:`\sigma` for the absorption term
.. math:: \sigma\mathbf{u}
where :math:`\mathbf{u}` is the velocity field.
"""
return self.absorption_function
@absorption.setter
def absorption(self, expression):
r""" Setter function for the absorption field.
:param firedrake.Expression expression: The expression to interpolate onto the absorption field.
"""
self.absorption_function.interpolate(expression)
# Source term
@property
def source(self):
r""" The source term on the RHS of the velocity (or stress) equation. """
return self.source_function
@source.setter
def source(self, expression):
r""" Setter function for the source field.
:param firedrake.Expression expression: The expression to interpolate onto the source field.
"""
self.source_function.interpolate(expression)
def assemble_inverse_mass(self):
r""" Compute the inverse of the consistent mass matrix for the velocity and stress equations.
:returns: None
"""
# Inverse of the (consistent) mass matrix for the velocity equation.
self.inverse_mass_velocity = assemble(inner(self.w, self.u)*dx, inverse=True)
self.inverse_mass_velocity.assemble()
self.imass_velocity = self.inverse_mass_velocity.M
# Inverse of the (consistent) mass matrix for the stress equation.
self.inverse_mass_stress = assemble(inner(self.v, self.s)*dx, inverse=True)
self.inverse_mass_stress.assemble()
self.imass_stress = self.inverse_mass_stress.M
def copy_massmatrix_into_dat(self):
# Copy the velocity mass matrix into a Dat
vmat = self.imass_velocity.handle
dofs_per_entity = self.U.fiat_element.entity_dofs()
dofs_per_entity = sum(self.mesh.make_dofs_per_plex_entity(dofs_per_entity))
arity = dofs_per_entity*self.U.topological.dim
self.velocity_mass_asdat = Dat(DataSet(self.mesh.cell_set, arity*arity), dtype='double')
istart, iend = vmat.getOwnershipRange()
idxs = [PETSc.IS().createGeneral(np.arange(i, i+arity, dtype=np.int32),
comm=PETSc.COMM_SELF)
for i in range(istart, iend, arity)]
submats = vmat.getSubMatrices(idxs, idxs)
for i, m in enumerate(submats):
self.velocity_mass_asdat.data[i] = m[:, :].flatten()
info("Computed velocity mass matrix")
# Copy the stress mass matrix into a Dat
smat = self.imass_stress.handle
dofs_per_entity = self.S.fiat_element.entity_dofs()
dofs_per_entity = sum(self.mesh.make_dofs_per_plex_entity(dofs_per_entity))
arity = dofs_per_entity*self.S.topological.dim
self.stress_mass_asdat = Dat(DataSet(self.mesh.cell_set, arity*arity), dtype='double')
istart, iend = smat.getOwnershipRange()
idxs = [PETSc.IS().createGeneral(np.arange(i, i+arity, dtype=np.int32),
comm=PETSc.COMM_SELF)
for i in range(istart, iend, arity)]
submats = smat.getSubMatrices(idxs, idxs)
for i, m in enumerate(submats):
self.stress_mass_asdat.data[i] = m[:, :].flatten()
info("Computed stress mass matrix")
@property
def form_uh1(self):
""" UFL for uh1 equation. """
F = inner(self.w, self.u)*dx - self.f(self.w, self.s0, self.u0, self.n, self.absorption)
return F
@cached_property
def rhs_uh1(self):
""" RHS for uh1 equation. """
return rhs(self.form_uh1)
@property
def form_stemp(self):
""" UFL for stemp equation. """
F = inner(self.v, self.s)*dx - self.g(self.v, self.uh1, self.I, self.n, self.l, self.mu, self.source)
return F
@cached_property
def rhs_stemp(self):
""" RHS for stemp equation. """
return rhs(self.form_stemp)
@property
def form_uh2(self):
""" UFL for uh2 equation. """
F = inner(self.w, self.u)*dx - self.f(self.w, self.stemp, self.u0, self.n, self.absorption)
return F
@cached_property
def rhs_uh2(self):
""" RHS for uh2 equation. """
return rhs(self.form_uh2)
@property
def form_u1(self):
""" UFL for u1 equation. """
# Note that we have multiplied through by dt here.
F = self.density*inner(self.w, self.u)*dx - self.density*inner(self.w, self.u0)*dx - self.dt*inner(self.w, self.uh1)*dx - ((self.dt**3)/24.0)*inner(self.w, self.uh2)*dx
return F
@cached_property
def rhs_u1(self):
""" RHS for u1 equation. """
return rhs(self.form_u1)
@property
def form_sh1(self):
""" UFL for sh1 equation. """
F = inner(self.v, self.s)*dx - self.g(self.v, self.u1, self.I, self.n, self.l, self.mu, self.source)
return F
@cached_property
def rhs_sh1(self):
""" RHS for sh1 equation. """
return rhs(self.form_sh1)
@property
def form_utemp(self):
""" UFL for utemp equation. """
F = inner(self.w, self.u)*dx - self.f(self.w, self.sh1, self.u1, self.n, self.absorption)
return F
@cached_property
def rhs_utemp(self):
""" RHS for utemp equation. """
return rhs(self.form_utemp)
@property
def form_sh2(self):
""" UFL for sh2 equation. """
F = inner(self.v, self.s)*dx - self.g(self.v, self.utemp, self.I, self.n, self.l, self.mu, self.source)
return F
@cached_property
def rhs_sh2(self):
""" RHS for sh2 equation. """
return rhs(self.form_sh2)
@property
def form_s1(self):
""" UFL for s1 equation. """
# Note that we have multiplied through by dt here.
F = inner(self.v, self.s)*dx - inner(self.v, self.s0)*dx - self.dt*inner(self.v, self.sh1)*dx - ((self.dt**3)/24.0)*inner(self.v, self.sh2)*dx
return F
@cached_property
def rhs_s1(self):
""" RHS for s1 equation. """
return rhs(self.form_s1)
def f(self, w, s0, u0, n, absorption=None):
""" The RHS of the velocity equation. """
f = -inner(grad(w), s0)*dx + inner(avg(s0)*n('+'), w('+'))*dS + inner(avg(s0)*n('-'), w('-'))*dS
if(absorption):
f += -inner(w, absorption*u0)*dx
return f
def g(self, v, u1, I, n, l, mu, source=None):
""" The RHS of the stress equation. """
g = - l*(v[i, j]*I[i, j]).dx(k)*u1[k]*dx + l*(jump(v[i, j], n[k])*I[i, j]*avg(u1[k]))*dS + l*(v[i, j]*I[i, j]*u1[k]*n[k])*ds - mu*inner(div(v), u1)*dx + mu*inner(avg(u1), jump(v, n))*dS - mu*inner(div(v.T), u1)*dx + mu*inner(avg(u1), jump(v.T, n))*dS + mu*inner(u1, dot(v, n))*ds + mu*inner(u1, dot(v.T, n))*ds
if(source):
g += inner(v, source)*dx
return g
def ast_matmul(self, F_a, implementation='optimized'):
"""Generate an AST for a PyOP2 kernel performing a matrix-vector multiplication."""
# The number of dofs on each element is /ndofs*cdim/
F_a_fs = F_a.function_space()
ndofs = F_a_fs.fiat_element.entity_dofs()
ndofs = sum(self.mesh.make_dofs_per_plex_entity(ndofs))
cdim = F_a_fs.dim
name = 'mat_vec_mul_kernel_%s' % F_a_fs.name
identifier = (ndofs, cdim, name, implementation)
if identifier in self.asts:
return self.asts[identifier]
from coffee import isa, options
if cdim and cdim % isa['dp_reg'] == 0:
simd_pragma = '#pragma simd reduction(+:sum)'
else:
simd_pragma = ''
# Craft the AST
if implementation == 'optimized' and cdim >= 4:
body = ast.Incr(ast.Symbol('sum'),
ast.Prod(ast.Symbol('A', ('i',), ((ndofs*cdim, 'j*%d + k' % cdim),)),
ast.Symbol('B', ('j', 'k'))))
body = ast.c_for('k', cdim, body, simd_pragma).children[0]
body = [ast.Decl('const int', ast.Symbol('index'), init=ast.Symbol('i%%%d' % cdim)),
ast.Decl('double', ast.Symbol('sum'), init=ast.Symbol('0.0')),
ast.c_for('j', ndofs, body).children[0],
ast.Assign(ast.Symbol('C', ('i/%d' % cdim, 'index')), 'sum')]
body = ast.Block([ast.c_for('i', ndofs*cdim, body).children[0]])
funargs = [ast.Decl('double* restrict', 'A'),
ast.Decl('double *restrict *restrict', 'B'),
ast.Decl('double *restrict *', 'C')]
fundecl = ast.FunDecl('void', name, funargs, body, ['static', 'inline'])
else:
body = ast.Incr(ast.Symbol('C', ('i/%d' % cdim, 'index')),
ast.Prod(ast.Symbol('A', ('i',), ((ndofs*cdim, 'j*%d + k' % cdim),)),
ast.Symbol('B', ('j', 'k'))))
body = ast.c_for('k', cdim, body).children[0]
body = [ast.Decl('const int', ast.Symbol('index'), init=ast.Symbol('i%%%d' % cdim)),
ast.Assign(ast.Symbol('C', ('i/%d' % cdim, 'index' % cdim)), '0.0'),
ast.c_for('j', ndofs, body).children[0]]
body = ast.Block([ast.c_for('i', ndofs*cdim, body).children[0]])
funargs = [ast.Decl('double* restrict', 'A'),
ast.Decl('double *restrict *restrict', 'B'),
ast.Decl('double *restrict *', 'C')]
fundecl = ast.FunDecl('void', name, funargs, body, ['static', 'inline'])
# Track the AST for later fast retrieval
self.asts[identifier] = fundecl
return fundecl
def solve(self, rhs, matrix_asdat, result):
F_a = assemble(rhs)
ast_matmul = self.ast_matmul(F_a)
# Create the par loop (automatically added to the trace of loops to be executed)
kernel = op2.Kernel(ast_matmul, ast_matmul.name)
op2.par_loop(kernel, self.mesh.cell_set,
matrix_asdat(op2.READ),
F_a.dat(op2.READ, F_a.cell_node_map()),
result.dat(op2.WRITE, result.cell_node_map()))
def write(self, u=None, s=None, output=True):
r""" Write the velocity and/or stress fields to file.
:param firedrake.Function u: The velocity field.
:param firedrake.Function s: The stress field.
:returns: None
"""
_trace.evaluate_all()
if output:
with timed_region('i/o'):
if(u):
self.u_stream.write(u)
if(s):
# FIXME: Cannot currently write tensor valued fields to a VTU file.
# See https://github.com/firedrakeproject/firedrake/issues/538
#self.s_stream << s
pass
def run(self, T, TS=0):
""" Run the elastic wave simulation until t = T or ntimesteps = TS.
:param float T: The finish time of the simulation.
:param float TS: The maximum number of timesteps performed; ignored if = 0.
:returns: The final solution fields for velocity and stress.
"""
# Write out the initial condition.
self.write(self.u1, self.s1, self.tofile)
info("Generating inverse mass matrix")
# Pre-assemble the inverse mass matrices, which should stay
# constant throughout the simulation (assuming no mesh adaptivity).
start = time()
self.assemble_inverse_mass()
end = time()
info("DONE! (Elapsed: %f s)" % round(end - start, 3))
op2.MPI.COMM_WORLD.barrier()
info("Copying inverse mass matrix into a dat...")
start = time()
self.copy_massmatrix_into_dat()
end = time()
info("DONE! (Elapsed: %f s)" % round(end - start, 3))
op2.MPI.COMM_WORLD.barrier()
start = time()
t = self.dt
timestep = 0
ntimesteps = sys.maxint if TS == 0 else TS
while t <= T + 1e-12 and timestep < ntimesteps:
if op2.MPI.COMM_WORLD.rank == 0 and timestep % self.output == 0:
info("t = %f, (timestep = %d)" % (t, timestep))
with loop_chain("main1",
tile_size=self.tiling_size,
num_unroll=self.tiling_uf,
mode=self.tiling_mode,
extra_halo=self.tiling_halo,
explicit=self.tiling_explicit,
use_glb_maps=self.tiling_glb_maps,
use_prefetch=self.tiling_prefetch,
coloring=self.tiling_coloring,
ignore_war=True,
log=self.tiling_log):
# In case the source is time-dependent, update the time 't' here.
if(self.source):
with timed_region('source term update'):
self.source_expression.t = t
self.source = self.source_expression
# Solve for the velocity vector field.
self.solve(self.rhs_uh1, self.velocity_mass_asdat, self.uh1)
self.solve(self.rhs_stemp, self.stress_mass_asdat, self.stemp)
self.solve(self.rhs_uh2, self.velocity_mass_asdat, self.uh2)
self.solve(self.rhs_u1, self.velocity_mass_asdat, self.u1)
# Solve for the stress tensor field.
self.solve(self.rhs_sh1, self.stress_mass_asdat, self.sh1)
self.solve(self.rhs_utemp, self.velocity_mass_asdat, self.utemp)
self.solve(self.rhs_sh2, self.stress_mass_asdat, self.sh2)
self.solve(self.rhs_s1, self.stress_mass_asdat, self.s1)
self.u0.assign(self.u1)
self.s0.assign(self.s1)
# Write out the new fields
self.write(self.u1, self.s1, self.tofile and timestep % self.output == 0)
# Move onto next timestep
t += self.dt
timestep += 1
# Write out the final state of the fields
self.write(self.u1, self.s1, self.tofile)
end = time()
return start, end, timestep, self.u1, self.s1
# Helper stuff
def Vp(mu, l, density):
r""" Calculate the P-wave velocity, given by
.. math:: \sqrt{\frac{(\lambda + 2\mu)}{\rho}}
where :math:`\rho` is the density, and :math:`\lambda` and :math:`\mu` are
the first and second Lame parameters, respectively.
:param mu: The second Lame parameter.
:param l: The first Lame parameter.
:param density: The density.
:returns: The P-wave velocity.
:rtype: float
"""
return sqrt((l + 2*mu)/density)
def Vs(mu, density):
r""" Calculate the S-wave velocity, given by
.. math:: \sqrt{\frac{\mu}{\rho}}
where :math:`\rho` is the density, and :math:`\mu` is the second Lame parameter.
:param mu: The second Lame parameter.
:param density: The density.
:returns: The P-wave velocity.
:rtype: float
"""
return sqrt(mu/density)
def cfl_dt(dx, Vp, courant_number):
r""" Computes the maximum permitted value for the timestep math:`\delta t`.
:param float dx: The characteristic element length.
:param float Vp: The P-wave velocity.
:param float courant_number: The desired Courant number
:returns: The maximum permitted timestep, math:`\delta t`.
:rtype: float
"""
return (courant_number*dx)/Vp
class ExplosiveSourceLF4(object):
def explosive_source_lf4(self, T=2.5, TS=0, Lx=300.0, Ly=150.0, h=2.5, cn=0.05,
mesh_file=None, output=1, poly_order=2, params=None):
tile_size = params['tile_size']
num_unroll = params['num_unroll']
extra_halo = params['extra_halo']
part_mode = params['partitioning']
explicit_mode = params['explicit_mode']
if explicit_mode:
fusion_scheme = FusionSchemes.get(explicit_mode, part_mode, tile_size)
num_solves, params['explicit_mode'] = fusion_scheme
else:
num_solves = ElasticLF4.num_solves
if mesh_file:
mesh = Mesh(mesh_file)
else:
mesh = RectangleMesh(int(Lx/h), int(Ly/h), Lx, Ly)
set_log_level(INFO)
kwargs = {}
if params['mode'] in ['tile', 'only_tile']:
s_depth = calculate_sdepth(num_solves, num_unroll, extra_halo)
if part_mode == 'metis':
kwargs['reorder'] = ('metis-rcm', mesh.num_cells() / tile_size)
else:
s_depth = 1
# FIXME: need s_depth in firedrake to be able to use this
# kwargs['s_depth'] = s_depth
params['s_depth'] = s_depth
mesh.topology.init(**kwargs)
slope(mesh, debug=True)
# Instantiate the model
self.elastic = ElasticLF4(mesh, "DG", poly_order, 2, output, params)
info("S-depth used: %d" % s_depth)
info("Polynomial order: %d" % poly_order)
# Constants
self.elastic.density = 1.0
self.elastic.mu = 3600.0
self.elastic.l = 3599.3664
self.Vp = Vp(self.elastic.mu, self.elastic.l, self.elastic.density)
self.Vs = Vs(self.elastic.mu, self.elastic.density)
info("P-wave velocity: %f" % self.Vp)
info("S-wave velocity: %f" % self.Vs)
self.dx = h
self.courant_number = cn
self.elastic.dt = cfl_dt(self.dx, self.Vp, self.courant_number)
info("Using a timestep of %f" % self.elastic.dt)
# Source
exp_area = (44.5, 45.5, Ly - 1.5, Ly - 0.5)
if poly_order == 1:
# Adjust explosion area
exp_area = (149.5, 150.5, Ly - 1.5, Ly - 0.5)
a = 159.42
self.elastic.source_expression = Expression((("x[0] >= %f && x[0] <= %f && x[1] >= %f && x[1] <= %f ? (-1.0 + 2*a*pow(t - 0.3, 2))*exp(-a*pow(t - 0.3, 2)) : 0.0" % exp_area, "0.0"),
("0.0", "x[0] >= %f && x[0] <= %f && x[1] >= %f && x[1] <= %f ? (-1.0 + 2*a*pow(t - 0.3, 2))*exp(-a*pow(t - 0.3, 2)) : 0.0" % exp_area)), a=a, t=0)
self.elastic.source_function = Function(self.elastic.S)
self.elastic.source = self.elastic.source_expression
# Absorption
F = FunctionSpace(mesh, "DG", poly_order, name='F')
self.elastic.absorption_function = Function(F)
self.elastic.absorption = Expression("x[0] <= 20 || x[0] >= %f || x[1] <= 20.0 ? 1000 : 0" % (Lx - 20,))
# Initial conditions
uic = Expression(('0.0', '0.0'))
self.elastic.u0.assign(Function(self.elastic.U).interpolate(uic))
sic = Expression((('0', '0'), ('0', '0')))
self.elastic.s0.assign(Function(self.elastic.S).interpolate(sic))
# Run the simulation
start, end, ntimesteps, u1, s1 = self.elastic.run(T, TS=TS)
# Print runtime summary
output_time(start, end,
tofile=params['tofile'],
verbose=params['verbose'],
meshid=("h%s" % h).replace('.', ''),
ntimesteps=ntimesteps,
nloops=ElasticLF4.loop_chain_length*num_unroll,
partitioning=part_mode,
tile_size=tile_size,
extra_halo=extra_halo,
explicit_mode=explicit_mode,
glb_maps=params['use_glb_maps'],
prefetch=params['use_prefetch'],
coloring=params['coloring'],
poly_order=poly_order,
domain=os.path.splitext(os.path.basename(mesh.name))[0],
function_spaces=[self.elastic.S, self.elastic.U])
return u1, s1
if __name__ == '__main__':
set_log_level(INFO)
# Parse the input
args = parser()
params = {
'num_unroll': args.num_unroll,
'tile_size': args.tile_size,
'mode': args.fusion_mode,
'partitioning': args.part_mode,
'coloring': args.coloring,
'extra_halo': args.extra_halo,
'explicit_mode': args.explicit_mode,
'explicit_mode_id': args.explicit_mode,
'use_glb_maps': args.glb_maps,
'use_prefetch': args.prefetch,
'log': args.log,
'tofile': args.tofile,
'verbose': args.verbose
}
# Set the kernel optimizaation level (default: O2)
parameters['coffee']['optlevel'] = args.coffee_opt
# Is it just a run to check correctness?
if args.check:
Lx, Ly, h, time_max, tolerance = 20, 20, 2.5, 0.01, 1e-10
info("Checking correctness of original and tiled versions, with:")
info(" (Lx, Ly, T, tolerance)=%s" % str((Lx, Ly, time_max, tolerance)))
info(" %s" % params)
# Run the tiled variant
u1, s1 = ExplosiveSourceLF4().explosive_source_lf4(time_max, Lx, Ly, h,
sys.maxint, params)
# Run the original code
original = {'num_unroll': 0, 'tile_size': 0, 'mode': None,
'partitioning': 'chunk', 'extra_halo': 0}
u1_orig, s1_orig = ExplosiveSourceLF4().explosive_source_lf4(time_max, Lx, Ly, h,
sys.maxint, original)
# Check output
info("Checking output...")
assert np.allclose(u1.dat.data, u1_orig.dat.data, rtol=1e-10)
assert np.allclose(s1.dat.data, s1_orig.dat.data, rtol=1e-10)
info("Results OK!")
sys.exit(0)
# Set the input mesh
if args.mesh_file:
info("Using the unstructured mesh %s" % args.mesh_file)
kwargs = {'T': args.time_max, 'TS': args.timesteps_max, 'mesh_file': args.mesh_file,
'h': args.ms, 'cn': args.cn, 'output': args.output, 'poly_order': args.poly_order,
'params': params}
else:
Lx, Ly = eval(args.mesh_size)
info("Using the structured mesh with values (Lx,Ly,h)=%s" % str((Lx, Ly, args.ms)))
kwargs = {'T': args.time_max, 'TS': args.timesteps_max, 'Lx': Lx, 'Ly': Ly, 'h': args.ms,
'output': args.output, 'poly_order': args.poly_order, 'params': params}
info("h=%f, courant number=%f" % (args.ms, args.cn))
if args.profile:
cProfile.run('ExplosiveSourceLF4().explosive_source_lf4(**kwargs)',
'log_rank%d.cprofile' % op2.MPI.COMM_WORLD.rank)
else:
u1, s1 = ExplosiveSourceLF4().explosive_source_lf4(**kwargs)
| []
| []
| [
"NODENAME",
"TMPDIR"
]
| [] | ["NODENAME", "TMPDIR"] | python | 2 | 0 | |
tests/nbtools/test_user_config.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Module docstring."""
import os
import pytest
import pytest_check as check
import yaml
import msticpy
from msticpy.data import QueryProvider
from msticpy.nbtools import user_config
from msticpy.common.pkg_config import settings
# pylint: disable=redefined-outer-name, unused-import, ungrouped-imports
try:
import msticnb # noqa: F401
_NOTEBOOKLETS = True
except ImportError:
_NOTEBOOKLETS = False
try:
from msticpy.datamodel.pivot import Pivot # noqa: F401
_PIVOT = True
except ImportError:
_PIVOT = False
from ..unit_test_lib import custom_mp_config
__author__ = "Ian Hellen"
CONFIG_TEXT = """
UserDefaults:
# List of query providers to load
QueryProviders:
AzureSentinel:
Default:
alias: asi
connect: False
CyberSoc:
alias: soc
connect: False
Splunk:
connect: False
LocalData:
alias: local
# List of other providers/components to load
LoadComponents:
TILookup:
GeoIpLookup:
provider: IpStackLookup
Notebooklets:
query_provider:
LocalData:
workspace: CyberSoc
some_param: some_value
Pivot:
AzureData:
auth_methods: ['cli','interactive']
connect: False
AzureSentinelAPI:
auth_methods: ['env','interactive']
connect: False
"""
@pytest.fixture(scope="module")
def mp_settings():
"""Return test settings."""
settings_dict = yaml.safe_load(CONFIG_TEXT)
if not _NOTEBOOKLETS and settings_dict.get("LoadComponents", {}).get(
"Notebooklets"
):
del settings_dict["LoadComponents"]["Notebooklets"]
if not _PIVOT and settings_dict.get("LoadComponents", {}).get("Pivot"):
del settings_dict["LoadComponents"]["Pivot"]
return settings_dict
def test_user_config(mp_settings):
"""Test user config."""
mpcfg_path = os.environ.get("MSTICPYCONFIG")
with custom_mp_config(mp_path=mpcfg_path):
settings["UserDefaults"] = mp_settings.get("UserDefaults")
prov_dict = user_config.load_user_defaults()
check.is_in("qry_asi", prov_dict)
check.is_instance(prov_dict["qry_asi"], QueryProvider)
check.equal(prov_dict["qry_asi"].environment, "AzureSentinel")
check.is_in("qry_soc", prov_dict)
check.is_instance(prov_dict["qry_soc"], QueryProvider)
check.equal(prov_dict["qry_asi"].environment, "AzureSentinel")
check.is_in("qry_splunk", prov_dict)
check.is_instance(prov_dict["qry_splunk"], QueryProvider)
check.equal(prov_dict["qry_splunk"].environment, "Splunk")
check.is_in("qry_local", prov_dict)
check.is_instance(prov_dict["qry_local"], QueryProvider)
check.is_true(prov_dict["qry_local"].connected)
check.equal(prov_dict["qry_local"].environment, "LocalData")
check.is_in("ti_lookup", prov_dict)
check.is_in("geoip", prov_dict)
check.is_in("az_data", prov_dict)
check.is_in("azs_api", prov_dict)
check.is_true(hasattr(msticpy, "current_providers"))
| []
| []
| [
"MSTICPYCONFIG"
]
| [] | ["MSTICPYCONFIG"] | python | 1 | 0 | |
shell.go | package main
import (
"fmt"
"io"
"net"
"os"
"os/signal"
"path"
"strings"
"sync"
"syscall"
"time"
"github.com/jessevdk/go-flags"
"github.com/mitchellh/go-homedir"
log "github.com/tillberg/alog"
"github.com/tillberg/autorestart"
"github.com/tillberg/bismuth"
)
var OptsCommon struct {
Verbose bool `short:"v" long:"verbose" description:"Show verbose debug information"`
Version bool `long:"version" description:"Print gut-sync version"`
NoColor bool `long:"no-color" description:"Disable ANSI colors"`
}
var OptsSync struct {
IdentityFile string `short:"i" long:"identity"`
Positional struct {
LocalPath string
} `positional-args:"yes" required:"yes"`
}
type FileEvent struct {
ctx *SyncContext
filepath string
}
const shutdownChanLen = 20
var shutdownChan = make(chan bool, shutdownChanLen)
func IsShuttingDown() bool {
select {
case <-shutdownChan:
return true
default:
return false
}
}
const commitDebounceDuration = 100 * time.Millisecond
const reconnectMinDelay = 2 * time.Second
func (ctx *SyncContext) StartReverseTunnel(srcAddr string, destAddr string) (reconnectChan chan bool, err error) {
isFirstTime := true
firstTimeChan := make(chan error)
go func() {
logger := ctx.Logger()
lastConnectStartTime := time.Now()
for {
listener, tunnelErrChan, err := ctx.ReverseTunnel(srcAddr, destAddr)
if isFirstTime {
firstTimeChan <- err
isFirstTime = false
} else {
reconnectChan <- true
}
if err == nil {
err = <-tunnelErrChan
}
if IsShuttingDown() {
logger.Printf("@(dim)Reverse tunnel exiting (shutting down).@(r)\n")
return
}
if err == io.EOF {
logger.Printf("@(error:Connection lost.)\n")
} else {
logger.Printf("@(error:Encountered error on reverse-tunnel: %v)\n", err)
}
if listener != nil {
listener.Close() // Ignore any errors; it might already be closed.
}
reconnectLogger := ctx.NewLogger("")
reconnectStart := time.Now()
elapsedSeconds := func() int {
return int(time.Since(reconnectStart).Seconds())
}
for {
reconnectLogger.Replacef("@(dim)Reconnecting (%ds)...@(r)", elapsedSeconds())
// Rate-limit calls to Connect. The delay should be zero on timeout errors, assuming that the
// network timeout in bismuth is greater than reconnectMinDelay.
time.Sleep(reconnectMinDelay - time.Since(lastConnectStartTime))
lastConnectStartTime = time.Now()
err = ctx.Connect()
if err != nil {
squelch := false
netErr, ok := err.(net.Error)
if ok && netErr.Timeout() {
squelch = true
}
errStr := err.Error()
if strings.Contains(errStr, "no route to host") {
squelch = true
}
if strings.Contains(errStr, "connection refused") {
squelch = true
}
if !squelch {
logger.Printf("@(dim:Error while reconnecting: %v)\n", err)
}
} else {
reconnectLogger.Replacef("@(dim:Connection re-established after %d seconds.)\n", elapsedSeconds())
break
}
}
reconnectLogger.Close()
}
}()
reconnectChan = make(chan bool)
err = <-firstTimeChan
return reconnectChan, err
}
const reconnectBufferLength = 2
const eventBufferLength = 100
const forceFullSyncCheckString = "**force full sync check**"
func Sync(local *SyncContext, remotes []*SyncContext) (err error) {
status := local.NewLogger("sync")
defer status.Close()
allContexts := append([]*SyncContext{local}, remotes...)
hostsStrs := []string{}
for _, ctx := range allContexts {
hostsStrs = append(hostsStrs, ctx.SyncPathAnsi())
}
hostsStr := JoinWithAndAndCommas(hostsStrs...)
status.Printf("@(dim:Starting gut-sync between) %s@(dim:.)\n", hostsStr)
for _, ctx := range allContexts {
_, err = EnsureBuild(local, ctx)
if err != nil {
status.Bail(err)
}
}
ports, err := FindOpenPorts(1, allContexts...)
if err != nil {
status.Bail(err)
}
// status.Printf("Using ports %v\n", ports)
gutdPort := ports[0]
gutdAddr := fmt.Sprintf("localhost:%d", gutdPort)
repoName := RandSeq(8) + local.getPidfileScope()
eventChan := make(chan FileEvent, eventBufferLength)
// Start up gut-daemon on the local host, and create a reverse tunnel from each of the remote hosts
// back to the local gut-tdaemon. All hosts can connect to gut-daemon at localhost:<gutdPort>, which
// makes configuration a little simpler.
ready := make(chan bool)
numTasks := 0
goTask := func(taskCtx *SyncContext, fn func(*SyncContext)) {
numTasks++
go func() {
fn(taskCtx)
ready <- true
}()
}
joinTasks := func() {
for numTasks > 0 {
<-ready
numTasks--
}
}
if len(remotes) > 0 {
goTask(local, func(taskCtx *SyncContext) {
err := taskCtx.GutDaemon(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
})
}
for _, ctx := range remotes {
if !ctx.IsLocal() {
goTask(ctx, func(taskCtx *SyncContext) {
reconnectChan, err := taskCtx.StartReverseTunnel(gutdAddr, gutdAddr)
if err != nil {
status.Bail(err)
}
go func() {
for {
<-reconnectChan
eventChan <- FileEvent{taskCtx, forceFullSyncCheckString}
}
}()
})
}
}
joinTasks()
// Fetch the tail hash for all contexts in parallel
for _, ctx := range allContexts {
goTask(ctx, func(taskCtx *SyncContext) {
taskCtx.UpdateTailHash()
})
}
joinTasks()
// Iterate over the contexts, finding the common tailHash, if any. Bail if there are conflicting tailHashes.
tailHash := ""
var tailHashFoundOn *SyncContext
localTailHash := local.GetTailHash()
if localTailHash != "" {
tailHash = localTailHash
tailHashFoundOn = local
}
contextsNeedInit := []*SyncContext{}
for _, ctx := range remotes {
myTailHash := ctx.GetTailHash()
if myTailHash == "" {
err = ctx.AssertSyncFolderIsEmpty()
if err != nil {
status.Bail(err)
}
contextsNeedInit = append(contextsNeedInit, ctx)
} else {
if tailHash == "" {
tailHash = myTailHash
tailHashFoundOn = ctx
} else {
if tailHash != myTailHash {
status.Printf("@(error:Found different gut repo base commits:)\n")
status.Printf("@(commit:%s) @(error:at) %s\n",
TrimCommit(tailHash), tailHashFoundOn.SyncPathAnsi())
status.Printf("@(commit:%s) @(error:at) %s\n",
TrimCommit(myTailHash), ctx.SyncPathAnsi())
Shutdown(status.Colorify("@(error:Cannot sync incompatible gut repos.)"), 1)
}
goTask(ctx, func(taskCtx *SyncContext) {
err := taskCtx.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
})
}
}
}
// If local needs to be initialized, do so, either from scratch or by bootstrapping from tailHashFoundOn.
if localTailHash == "" {
if tailHash == "" {
status.Printf("@(dim:No existing gut repo found. Initializing gut repo in %s.)\n", local.SyncPathAnsi())
err = local.GutInit()
if err != nil {
status.Bail(err)
}
err = local.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
err = local.GutEnsureInitialCommit()
if err != nil {
status.Bail(err)
}
local.UpdateTailHash()
tailHash = local.GetTailHash()
if tailHash == "" {
Shutdown(status.Colorify("Failed to initialize new gut repo."), 1)
}
tailHashFoundOn = local
} else {
err = local.GutInit()
if err != nil {
status.Bail(err)
}
err = local.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
joinTasks() // Wait for GutSetupOrigin on tailHashFoundOn to finish
err = tailHashFoundOn.GutPush()
if err != nil {
status.Bail(err)
}
err = local.GutCheckoutAsMaster(tailHashFoundOn.BranchName())
if err != nil {
status.Bail(err)
}
}
} else {
goTask(local, func(taskCtx *SyncContext) {
err := taskCtx.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
})
}
// Bootstrap any non-local contexts that need it:
for _, ctx := range contextsNeedInit {
goTask(ctx, func(taskCtx *SyncContext) {
err := taskCtx.GutInit()
if err != nil {
status.Bail(err)
}
err = taskCtx.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
err = taskCtx.GutPull()
if err != nil {
status.Bail(err)
}
})
}
joinTasks()
commitScoped := func(src *SyncContext, changedPaths []string, updateUntracked bool) (changed bool, err error) {
prefix := CommonPathPrefix(changedPaths...)
if prefix != "" {
// git is annoying if you try to git-add git-ignored files (printing a message that is very helpful when there is a human
// attached to stdin/stderr), so let's always just target the last *folder* by lopping off everything after the last slash:
lastIndex := strings.LastIndex(prefix, "/")
if lastIndex == -1 {
prefix = ""
} else {
prefix = prefix[:lastIndex+1]
}
}
if prefix == "" {
prefix = "."
}
changed, err = src.GutCommit(prefix, updateUntracked)
if err != nil {
return false, err
}
return changed, nil
}
// Start up an instance of fswatch/inotifywait for each context to watch for file changes
for _, ctx := range allContexts {
goTask(ctx, func(taskCtx *SyncContext) {
taskCtx.WatchForChanges(func(filepath string) {
eventChan <- FileEvent{taskCtx, filepath}
})
})
}
joinTasks()
var haveChanges bool
var changedPaths map[*SyncContext]map[string]bool
var changedIgnore map[*SyncContext]bool
var forceSyncCheck bool
clearChanges := func() {
haveChanges = false
changedPaths = make(map[*SyncContext]map[string]bool)
changedIgnore = make(map[*SyncContext]bool)
forceSyncCheck = false
}
clearChanges()
flushChanges := func() {
// Flush all file changes, in three phases:
// - Commit on all nodes that have seen recent changes
// - Push and merge all changes to the local master
// - Pull changes back out to the remotes.
// First phase, Commit.
// (This is typically just one context, except at startup, when we create a pseudo-change event for each context.)
changedCtxChan := make(chan *SyncContext)
for ctx, pathMap := range changedPaths {
go func(taskCtx *SyncContext, taskPathMap map[string]bool) {
paths := []string{}
for path := range taskPathMap {
paths = append(paths, path)
}
_, changedThisIgnore := changedIgnore[taskCtx]
// log.Printf("Starting commitScoped on %s\n", taskCtx.NameAnsi())
changed, err := commitScoped(taskCtx, paths, changedThisIgnore)
// log.Printf("Finished commitScoped on %s\n", taskCtx.NameAnsi())
if err != nil {
status.Printf("@(error:Commit failed on) %s@(error:: %v)\n", taskCtx.NameAnsi(), err)
changedCtxChan <- nil
} else {
if changed {
changedCtxChan <- taskCtx
} else {
changedCtxChan <- nil
}
}
}(ctx, pathMap)
}
changedCtxs := []*SyncContext{}
for _ = range changedPaths {
ctx := <-changedCtxChan
if ctx != nil {
changedCtxs = append(changedCtxs, ctx)
}
}
if !forceSyncCheck && len(changedCtxs) == 0 {
clearChanges()
return
}
clearChanges()
// Second phase, Push to local.
// XXX if remote has a previous change (i.e. from when it was the local), we don't necessarily pick up that change here.
for _, ctx := range changedCtxs {
if ctx != local {
// log.Printf("Starting GutPush on %s\n", ctx.NameAnsi())
err = ctx.GutPush()
// log.Printf("Finished GutPush on %s\n", ctx.NameAnsi())
if err != nil {
status.Printf("@(error:Failed to push changes from) %s @(error:to local: %v)\n", ctx.NameAnsi(), err)
continue
}
// log.Printf("Starting GutMerge on %s\n", ctx.NameAnsi())
err = local.GutMerge(ctx.BranchName())
// log.Printf("Finished GutMerge on %s\n", ctx.NameAnsi())
if err != nil {
status.Printf("@(error:Failed to merge) %s @(error:into) master@(error:: %v)\n", ctx.BranchName(), err)
}
}
}
masterCommitChan := make(chan string, len(remotes))
go func() {
masterCommit, err := local.GutRevParseHead()
if err != nil {
status.Printf("@(error:Failed to rev-parse head on local: %v)\n", err)
masterCommit = ""
}
for i := 0; i < len(remotes); i++ {
masterCommitChan <- masterCommit
}
}()
// Third phase, Pull to remotes.
done := make(chan error)
for _, ctx := range remotes {
go func(taskCtx *SyncContext) {
if !taskCtx.IsConnected() {
status.Printf("@(dim:Skipping sync to disconnected remote) %s\n", taskCtx.NameAnsi())
done <- nil
return
}
// log.Printf("Starting GutRevParseHead on %s\n", taskCtx.NameAnsi())
myCommit, err := taskCtx.GutRevParseHead()
// log.Printf("Finished GutRevParseHead on %s\n", taskCtx.NameAnsi())
if err != nil {
done <- err
return
}
localMasterCommit := <-masterCommitChan
if localMasterCommit != "" && myCommit != localMasterCommit {
// log.Printf("Starting GutPull on %s\n", taskCtx.NameAnsi())
err = taskCtx.GutPull()
// log.Printf("Finished GutPull on %s\n", taskCtx.NameAnsi())
}
// log.Printf("Finished third phase on %s\n", taskCtx.NameAnsi())
done <- err
}(ctx)
}
for _, ctx := range remotes {
select {
case err = <-done:
if err == NeedsCommitError {
status.Printf("@(dim:Need to commit on) %s @(dim:before it can pull.)\n", ctx.NameAnsi())
go func() {
eventChan <- FileEvent{ctx, forceFullSyncCheckString}
}()
err = nil
}
if err != nil {
status.Printf("@(error:Failed to pull changes to) %s@(error:: %v)\n", ctx.NameAnsi(), err)
}
case <-time.After(60 * time.Second):
status.Printf("@(warn:Timed out while waiting for a remote to finish syncing.)\n")
}
}
}
go func() {
// Note: The filesystem watchers are not necessarily listening to all updates yet, so we could miss file changes that occur between
// the commit_and_update calls below and the time that the filesystem watches are attached.
for _, ctx := range allContexts {
// Queue up an event to force checking for changes.
eventChan <- FileEvent{ctx, forceFullSyncCheckString}
}
}()
// Process messages from eventChan forever. Read as many messages as possible before needing to wait at least
// commitDebounceDuration, at which point we flush all the events (and commit & sync changes, etc).
var event FileEvent
for {
if haveChanges {
select {
case event = <-eventChan:
break
case <-time.After(commitDebounceDuration):
flushChanges()
continue
}
} else {
event = <-eventChan
}
if event.filepath == forceFullSyncCheckString {
// Force an attempt to update all the remotes, even if there are no new commits.
forceSyncCheck = true
// And also force a full commit & update-untracked on this node
changedIgnore[event.ctx] = true
}
parts := strings.Split(event.filepath, "/")
skip := false
for _, part := range parts {
if part == ".gut" {
skip = true
} else if part == ".gutignore" {
changedIgnore[event.ctx] = true
}
}
if skip {
continue
}
// status.Printf("@(dim:[)%s@(dim:] changed on) %s\n", event.filepath, event.ctx.NameAnsi())
haveChanges = true
ctxChanged, ok := changedPaths[event.ctx]
if !ok {
ctxChanged = make(map[string]bool)
changedPaths[event.ctx] = ctxChanged
}
ctxChanged[event.filepath] = true
}
}
var shutdownLock sync.Mutex
func Shutdown(reason string, exitcode int) {
shutdownLock.Lock()
for i := 0; i < shutdownChanLen; i++ {
shutdownChan <- true
}
status := log.New(os.Stderr, "", 0)
if reason != "" {
status.Printf("%s ", reason)
}
status.Printf("Stopping all subprocesses...\n")
done := make(chan bool)
for _, _ctx := range AllSyncContexts {
go func(ctx *SyncContext) {
if ctx.IsConnected() {
ctx.KillAllSessions()
// This generally shouldn't *do* anything other than
// clean up the PID files, as the killing would have
// been done already in KillAllSessions.
ctx.KillAllViaPidfiles()
ctx.Close()
}
done <- true
}(_ctx)
}
for range AllSyncContexts {
select {
case <-done:
case <-time.After(3 * time.Second):
}
}
status.Printf("Exiting.")
os.Stderr.WriteString("\n")
os.Exit(exitcode)
}
func printUsageInfoAndExit() {
status := log.New(os.Stderr, "", 0)
status.Println("")
status.Println("Usage: gut sync [option]... path [{ [user@]host:path | path }]...")
status.Println("")
status.Println("Options:")
status.Println(" --no-color: Disable ANSI colors")
status.Println(" --verbose: Show all commands executed")
status.Println(" --build-deps: Build gut-commands from git source instead of downloading tarball")
status.Println("--build-parallel: Build gut-commands in parallel via make -j {num_cores}")
status.Println("")
status.Println("Examples:")
status.Println(" Sync folder with one remote: gut sync ~/stuff/ [email protected]:~/stuff/")
status.Println(" Sync folder with two remotes: gut sync stuff/ remotehost1.com:~/work/ [email protected]:/tmp/sync")
status.Println(" Sync folders locally: gut sync ~/mywork /mnt/backup/mywork/")
status.Println("Just track changes, no syncing: gut sync ~/mywork")
status.Println("")
os.Exit(0)
}
func main() {
log.EnableColorTemplate()
log.AddAnsiColorCode("error", 31)
log.AddAnsiColorCode("commit", 32)
log.AddAnsiColorCode("path", 36)
var args []string = os.Args[1:]
if len(args) == 0 {
fmt.Println("You must specify a gut-command, e.g. `gut sync ...`")
os.Exit(1)
}
var cmd = args[0]
if IsGitCommand(cmd) {
if IsDangerousGitCommand(cmd) {
if len(args) < 2 || args[1] != "--danger" {
status := log.New(os.Stderr, "", 0)
status.Printf("@(dim:In order to prevent damage caused by accidentally using `)gut %s ...@(dim:`)\n", cmd)
status.Printf("@(dim:in cases where `)git %s ...@(dim:` was intended, you must append `)--danger@(dim:`)\n", cmd)
status.Printf("@(dim:immediately after the command, i.e. `)gut %s --danger ...@(dim:`.)\n", cmd)
status.Printf("@(dim:Alternatively, you could invoke) gut @(dim:directly at) @(path:%s)@(dim:.)\n", GutExePath)
status.Printf("@(dim:The commands that require this flag are:) %s\n", strings.Join(DangerousGitCommands, " "))
os.Exit(1)
}
// Split the "--danger" flag out before handing off the args list to the gut-command:
if len(args) > 2 {
args = append(args[:1], args[2:]...)
} else {
args = args[:1]
}
}
homeDir, err := homedir.Dir()
if err != nil {
log.Bail(err)
}
var gutExe = path.Join(homeDir, GutExePath[2:])
syscall.Exec(gutExe, append([]string{gutExe}, args...), os.Environ())
fmt.Printf("Failed to exec %s", gutExe)
os.Exit(1)
}
autorestart.CleanUpChildZombiesQuietly()
go autorestart.RestartOnChange()
status := log.New(os.Stderr, "", 0)
args = args[1:]
parser := flags.NewParser(&OptsCommon, flags.IgnoreUnknown)
var argsRemaining, err = parser.ParseArgs(args)
if err != nil {
printUsageInfoAndExit()
}
if OptsCommon.NoColor {
log.DisableColor()
}
if OptsCommon.Version {
status.Printf("gut-sync %s\n", GutVersion)
os.Exit(0)
}
bismuth.SetVerbose(OptsCommon.Verbose)
go func() {
sigintChan := make(chan os.Signal, 1)
signal.Notify(sigintChan, os.Interrupt)
<-sigintChan
Shutdown("Received SIGINT.", 1)
}()
go func() {
sighupChan := autorestart.NotifyOnSighup()
<-sighupChan
Shutdown("Received SIGHUP.", 0)
}()
if cmd == "build" {
var local = NewSyncContext()
err := local.Connect()
if err != nil {
status.Bail(err)
}
err = local.CheckLocalDeps()
if err != nil {
status.Bail(err)
}
didSomething, err := EnsureBuild(local, local)
if err != nil {
status.Bail(err)
}
if !didSomething {
status.Printf("@(dim:gut) " + GitVersion + " @(dim:has already been built.)\n")
}
} else if cmd == "sync" {
var remoteArgs, err = flags.ParseArgs(&OptsSync, argsRemaining)
if err != nil {
printUsageInfoAndExit()
}
ready := make(chan error)
local := NewSyncContext()
err = local.ParseSyncPath(OptsSync.Positional.LocalPath)
if err != nil {
status.Bail(err)
}
if len(remoteArgs) > 0 && os.Getenv("SSH_AUTH_SOCK") == "" {
log.Printf("@(error:SSH_AUTH_SOCK is not set in environment. Start up an ssh agent first before running gut-sync.)\n")
Shutdown("", 1)
}
go func() {
err = local.Connect()
if err != nil {
status.Bail(err)
}
err = local.CheckLocalDeps()
if err != nil {
status.Bail(err)
}
local.KillAllViaPidfiles()
local.SaveDaemonPid("gut", os.Getpid())
ready <- nil
}()
remotes := []*SyncContext{}
for _, remotePath := range remoteArgs {
remote := NewSyncContext()
remotes = append(remotes, remote)
err = remote.ParseSyncPath(remotePath)
if err != nil {
status.Bail(err)
}
go func(_remote *SyncContext) {
err = _remote.Connect()
if err != nil {
status.Printf("@(error:Failed to connect to %s: %s)\n", remote.Hostname(), err)
Shutdown("", 1)
}
_remote.KillAllViaPidfiles()
err = _remote.CheckRemoteDeps()
if err != nil {
status.Bail(err)
}
ready <- nil
}(remote)
}
for i := 0; i < len(remotes)+1; i++ {
<-ready
}
err = Sync(local, remotes)
if err != nil {
status.Bail(err)
}
}
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
src/combined_with_gaze.py | #gaze tracking https://github.com/antoinelame/GazeTracking
import numpy as np
import argparse
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from gaze_tracking import GazeTracking
import os
import dlib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def plot_model_history(model_history):
"""
Plot Accuracy and Loss curves given the model_history
"""
fig, axs = plt.subplots(1,2,figsize=(15,5))
# summarize history for accuracy
axs[0].plot(range(1,len(model_history.history['accuracy'])+1),model_history.history['accuracy'])
axs[0].plot(range(1,len(model_history.history['val_accuracy'])+1),model_history.history['val_accuracy'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1,len(model_history.history['accuracy'])+1),len(model_history.history['accuracy'])/10)
axs[0].legend(['train', 'val'], loc='best')
# summarize history for loss
axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss'])
axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10)
axs[1].legend(['train', 'val'], loc='best')
fig.savefig('plot.png')
plt.show()
# Define data generators
train_dir = 'data/train'
val_dir = 'data/test'
num_train = 28709
num_val = 7178
batch_size = 64
num_epoch = 50
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
validation_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
# Create the model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
def shape_to_np(shape, dtype="int"):
# initialize the list of (x, y)-coordinates
coords = np.zeros((68, 2), dtype=dtype)
# loop over the 68 facial landmarks and convert them
# to a 2-tuple of (x, y)-coordinates
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
# return the list of (x, y)-coordinates
return coords
def eye_on_mask(mask, side):
points = [shape[i] for i in side]
points = np.array(points, dtype=np.int32)
mask = cv2.fillConvexPoly(mask, points, 255)
return mask
def contouring(thresh, mid, img, right=False):
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
try:
cnt = max(cnts, key = cv2.contourArea)
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if right:
cx += mid
cv2.circle(img, (cx, cy), 4, (0, 0, 255), 2)
except:
pass
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_68.dat')
left = [36, 37, 38, 39, 40, 41]
right = [42, 43, 44, 45, 46, 47]
gaze = GazeTracking()
cap = cv2.VideoCapture(0)
ret, img = cap.read()
thresh = img.copy()
cv2.namedWindow('image')
kernel = np.ones((9, 9), np.uint8)
def nothing(x):
pass
# cv2.createTrackbar('threshold', 'image', 0, 255, nothing)
model.load_weights('model.h5')
# prevents openCL usage and unnecessary logging messages
cv2.ocl.setUseOpenCL(False)
# dictionary which assigns each label an emotion (alphabetical order)
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
while(True):
ret, img = cap.read()
# We send this frame to GazeTracking to analyze it
gaze.refresh(img)
img = gaze.annotated_frame()
text = ""
if gaze.is_blinking() or gaze.is_right() or gaze.is_left():
text = "Pay attention!"
elif gaze.is_center():
text = "Looking @ Screen"
cv2.putText(img, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
#cv2.putText(img, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
#cv2.putText(img, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.imshow("Demo", img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for rect in rects:
shape = predictor(gray, rect)
shape = shape_to_np(shape)
mask = np.zeros(img.shape[:2], dtype=np.uint8)
mask = eye_on_mask(mask, left)
mask = eye_on_mask(mask, right)
mask = cv2.dilate(mask, kernel, 5)
eyes = cv2.bitwise_and(img, img, mask=mask)
mask = (eyes == [0, 0, 0]).all(axis=2)
eyes[mask] = [255, 255, 255]
mid = (shape[42][0] + shape[39][0]) // 2
eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
threshold = 50
_, thresh = cv2.threshold(eyes_gray, threshold, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, None, iterations=2) #1
thresh = cv2.dilate(thresh, None, iterations=4) #2
thresh = cv2.medianBlur(thresh, 3) #3
thresh = cv2.bitwise_not(thresh)
contouring(thresh[:, 0:mid], mid, img)
contouring(thresh[:, mid:], mid, img, True)
for (x, y) in shape[36:48]:
cv2.circle(img, (x, y), 2, (255, 0, 0), -1)
# show the image with the face detections + facial landmarks
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
cv2.putText(img, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Video', cv2.resize(img,(1600,960),interpolation = cv2.INTER_CUBIC))
# cv2.imshow("image", thresh)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
cmd/social/main.go | package main
import (
"context"
"flag"
"os"
"os/signal"
"time"
_ "github.com/joho/godotenv/autoload"
"github.com/Zucke/social_prove/internal/db/mongo"
"github.com/Zucke/social_prove/internal/server"
"github.com/Zucke/social_prove/pkg/auth"
"github.com/Zucke/social_prove/pkg/logger"
)
func main() {
var port string
if port = os.Getenv("PORT"); port == "" {
port = "8000"
}
var dbURL string
if dbURL = os.Getenv("DATABASE_URI"); dbURL == "" {
dbURL = "mongodb://127.0.0.1:27017"
}
debug := flag.Bool("debug", false, "Debug mode")
flag.Parse()
log := logger.New("draid", !*debug)
ctx := context.Background()
dbClient, err := mongo.NewClient(ctx, log, dbURL)
if err != nil {
log.Error(err)
os.Exit(1)
}
err = dbClient.Start(ctx)
if err != nil {
log.Error(err)
os.Exit(1)
}
var fa auth.Repository
// firebaseCredentialsPath := os.Getenv("FIREBASE_CREDENTIALS_PATH")
// fa, err := auth.NewFirebaseAuth(context.Background(), firebaseCredentialsPath)
// if err != nil {
// log.Error(err)
// os.Exit(1)
// }
srv, err := server.New(port, *debug, dbClient, log, fa)
if err != nil {
log.Error(err)
os.Exit(1)
}
// Start the server.
go srv.Start()
// Wait for an interrupt.
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
// Attempt a graceful shutdown.
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
dbClient.Close(ctx)
srv.Close(ctx)
}
| [
"\"PORT\"",
"\"DATABASE_URI\"",
"\"FIREBASE_CREDENTIALS_PATH\""
]
| []
| [
"PORT",
"FIREBASE_CREDENTIALS_PATH",
"DATABASE_URI"
]
| [] | ["PORT", "FIREBASE_CREDENTIALS_PATH", "DATABASE_URI"] | go | 3 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# backend_hotornot_api directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "backend_hotornot_api"))
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/root.go | package cmd
import (
"fmt"
"os"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/xetys/hetzner-kube/pkg"
)
var cfgFile string
var debugMode bool
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "hetzner-kube",
Short: "A CLI tool to provision kubernetes clusters on Hetzner Cloud",
Long: `A tool for creating and managing kubernetes clusters on Hetzner Cloud.
`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
pkg.RenderProgressBars = false
if debugMode {
fmt.Println("Running in Debug Mode!")
pkg.RenderProgressBars = true
}
AppConf = NewAppConfig(debugMode)
},
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file to use")
rootCmd.PersistentFlags().BoolVarP(&debugMode, "debug", "d", false, "debug mode")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
setConfigDirectory()
}
// read in environment variables that match
viper.AutomaticEnv()
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
func setConfigDirectory() {
// Find config dir based on XDG Base Directory Specification
// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
xdgConfig := os.Getenv("XDG_CONFIG_HOME")
if xdgConfig != "" {
viper.AddConfigPath(xdgConfig)
}
// Failback to home directory
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
}
if err == nil {
viper.AddConfigPath(home)
}
if xdgConfig == "" && err != nil {
fmt.Println("Unable to detect any config location, please specify it with --config flag")
os.Exit(1)
}
// Search config directory with name ".hetzner-kube" (without extension).
viper.SetConfigName(".hetzner-kube")
}
| [
"\"XDG_CONFIG_HOME\""
]
| []
| [
"XDG_CONFIG_HOME"
]
| [] | ["XDG_CONFIG_HOME"] | go | 1 | 0 | |
pkg/cluster/provider.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"os"
"sort"
"sigs.k8s.io/kind/pkg/cluster/constants"
"sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/log"
internalcontext "sigs.k8s.io/kind/pkg/cluster/internal/context"
internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create"
internaldelete "sigs.k8s.io/kind/pkg/cluster/internal/delete"
"sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig"
"sigs.k8s.io/kind/pkg/cluster/internal/providers/docker"
"sigs.k8s.io/kind/pkg/cluster/internal/providers/podman"
internalprovider "sigs.k8s.io/kind/pkg/cluster/internal/providers/provider"
)
// DefaultName is the default cluster name
const DefaultName = constants.DefaultClusterName
// Provider is used to perform cluster operations
type Provider struct {
provider internalprovider.Provider
logger log.Logger
}
// NewProvider returns a new provider based on the supplied options
func NewProvider(options ...ProviderOption) *Provider {
p := &Provider{
logger: log.NoopLogger{},
}
// Ensure we apply the logger options first, while maintaining the order
// otherwise. This way we can trivially init the internal provider with
// the logger.
sort.SliceStable(options, func(i, j int) bool {
_, iIsLogger := options[i].(providerLoggerOption)
_, jIsLogger := options[j].(providerLoggerOption)
return iIsLogger && !jIsLogger
})
for _, o := range options {
o.apply(p)
}
if p.provider == nil {
// check if provider was overridden
// TODO: consider auto-detection once more than 1 provider is stable
providerEnv := os.Getenv("KIND_EXPERIMENTAL_PROVIDER")
if providerEnv == "podman" {
p.provider = podman.NewProvider(p.logger)
p.logger.Warn("enabling experimental podman provider")
} else {
p.provider = docker.NewProvider(p.logger)
}
}
return p
}
// ProviderOption is an option for configuring a provider
type ProviderOption interface {
apply(p *Provider)
}
// providerLoggerOption is a trivial ProviderOption adapter
// we use a type specific to logging options so we can handle them first
type providerLoggerOption func(p *Provider)
func (a providerLoggerOption) apply(p *Provider) {
a(p)
}
// ProviderWithLogger configures the provider to use Logger logger
func ProviderWithLogger(logger log.Logger) ProviderOption {
return providerLoggerOption(func(p *Provider) {
p.logger = logger
})
}
// TODO: remove this, rename internal context to something else
func (p *Provider) ic(name string) *internalcontext.Context {
return internalcontext.NewProviderContext(p.provider, name)
}
// Create provisions and starts a kubernetes-in-docker cluster
func (p *Provider) Create(name string, options ...CreateOption) error {
// apply options
opts := &internalcreate.ClusterOptions{}
for _, o := range options {
if err := o.apply(opts); err != nil {
return err
}
}
return internalcreate.Cluster(p.logger, p.ic(name), opts)
}
// Delete tears down a kubernetes-in-docker cluster
func (p *Provider) Delete(name, explicitKubeconfigPath string) error {
return internaldelete.Cluster(p.logger, p.ic(name), explicitKubeconfigPath)
}
// List returns a list of clusters for which nodes exist
func (p *Provider) List() ([]string, error) {
return p.provider.ListClusters()
}
// KubeConfig returns the KUBECONFIG for the cluster
// If internal is true, this will contain the internal IP etc.
// If internal is false, this will contain the host IP etc.
func (p *Provider) KubeConfig(name string, internal bool) (string, error) {
return kubeconfig.Get(p.ic(name), !internal)
}
// ExportKubeConfig exports the KUBECONFIG for the cluster, merging
// it into the selected file, following the rules from
// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config
// where explicitPath is the --kubeconfig value.
func (p *Provider) ExportKubeConfig(name string, explicitPath string) error {
return kubeconfig.Export(p.ic(name), explicitPath)
}
// ListNodes returns the list of container IDs for the "nodes" in the cluster
func (p *Provider) ListNodes(name string) ([]nodes.Node, error) {
return p.ic(name).ListNodes()
}
// ListInternalNodes returns the list of container IDs for the "nodes" in the cluster
// that are not external
func (p *Provider) ListInternalNodes(name string) ([]nodes.Node, error) {
return p.ic(name).ListInternalNodes()
}
// CollectLogs will populate dir with cluster logs and other debug files
func (p *Provider) CollectLogs(name, dir string) error {
return p.ic(name).CollectLogs(dir)
}
| [
"\"KIND_EXPERIMENTAL_PROVIDER\""
]
| []
| [
"KIND_EXPERIMENTAL_PROVIDER"
]
| [] | ["KIND_EXPERIMENTAL_PROVIDER"] | go | 1 | 0 | |
pp.py | # Parallel Python Software: http://www.parallelpython.com
# Copyright (c) 2005-2012, Vitalii Vanovschi
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
Parallel Python Software, Execution Server
http://www.parallelpython.com - updates, documentation, examples and support
forums
"""
import os
import threading
import logging
import inspect
import sys
import types
import time
import atexit
import user
import cPickle as pickle
import pptransport
import ppauto
import ppcommon
copyright = "Copyright (c) 2005-2012 Vitalii Vanovschi. All rights reserved"
version = "1.6.4"
# Reconnect persistent rworkers in seconds.
RECONNECT_WAIT_TIME = 5
# If set to true prints out the exceptions which are expected.
SHOW_EXPECTED_EXCEPTIONS = False
# we need to have set even in Python 2.3
try:
set
except NameError:
from sets import Set as set
_USE_SUBPROCESS = False
try:
import subprocess
_USE_SUBPROCESS = True
except ImportError:
import popen2
class _Task(object):
"""Class describing single task (job)
"""
def __init__(self, server, tid, callback=None,
callbackargs=(), group='default'):
"""Initializes the task"""
self.lock = threading.Lock()
self.lock.acquire()
self.tid = tid
self.server = server
self.callback = callback
self.callbackargs = callbackargs
self.group = group
self.finished = False
self.unpickled = False
def finalize(self, sresult):
"""Finalizes the task.
For internal use only"""
self.sresult = sresult
if self.callback:
self.__unpickle()
self.lock.release()
self.finished = True
def __call__(self, raw_result=False):
"""Retrieves result of the task"""
if not self.finished and self.server._exiting:
raise DestroyedServerError("Server was destroyed before the job completion")
self.wait()
if not self.unpickled and not raw_result:
self.__unpickle()
if raw_result:
return self.sresult
else:
return self.result
def wait(self):
"""Waits for the task"""
if not self.finished:
self.lock.acquire()
self.lock.release()
def __unpickle(self):
"""Unpickles the result of the task"""
self.result, sout = pickle.loads(self.sresult)
self.unpickled = True
if len(sout) > 0:
print sout,
if self.callback:
args = self.callbackargs + (self.result, )
self.callback(*args)
class _Worker(object):
"""Local worker class
"""
command = [sys.executable, "-u", "-m", "ppworker"]
command.append("2>/dev/null")
def __init__(self, restart_on_free, pickle_proto):
"""Initializes local worker"""
self.restart_on_free = restart_on_free
self.pickle_proto = pickle_proto
self.start()
def start(self):
"""Starts local worker"""
if _USE_SUBPROCESS:
proc = subprocess.Popen(self.command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.t = pptransport.CPipeTransport(proc.stdout, proc.stdin)
else:
self.t = pptransport.CPipeTransport(
*popen2.popen3(self.command)[:2])
self.pid = int(self.t.receive())
self.t.send(str(self.pickle_proto))
self.is_free = True
def stop(self):
"""Stops local worker"""
self.is_free = False
self.t.send('EXIT') # can send any string - it will exit
self.t.close()
def restart(self):
"""Restarts local worker"""
self.stop()
self.start()
def free(self):
"""Frees local worker"""
if self.restart_on_free:
self.restart()
else:
self.is_free = True
class _RWorker(pptransport.CSocketTransport):
"""Remote worker class
"""
def __init__(self, host, port, secret, server, message, persistent, socket_timeout):
"""Initializes remote worker"""
self.server = server
self.persistent = persistent
self.host = host
self.port = port
self.secret = secret
self.address = (host, port)
self.id = host + ":" + str(port)
self.server.logger.debug("Creating Rworker id=%s persistent=%s"
% (self.id, persistent))
self.socket_timeout = socket_timeout
self.connect(message)
def __del__(self):
"""Closes connection with remote server"""
self.close()
def connect(self, message=None):
"""Connects to a remote server"""
while True and not self.server._exiting:
try:
pptransport.SocketTransport.__init__(self, None, self.socket_timeout)
self._connect(self.host, self.port)
if not self.authenticate(self.secret):
self.server.logger.error("Authentication failed for host=%s, port=%s"
% (self.host, self.port))
return False
if message:
self.send(message)
self.is_free = True
return True
except:
if SHOW_EXPECTED_EXCEPTIONS:
self.server.logger.debug("Exception in connect method "
"(possibly expected)", exc_info=True)
if not self.persistent:
self.server.logger.debug("Deleting from queue Rworker %s"
% (self.id, ))
return False
self.server.logger.info("Failed to reconnect with " \
"(host=%s, port=%i), will try again in %i s"
% (self.host, self.port, RECONNECT_WAIT_TIME))
time.sleep(RECONNECT_WAIT_TIME)
class _Statistics(object):
"""Class to hold execution statisitcs for a single node
"""
def __init__(self, ncpus, rworker=None):
"""Initializes statistics for a node"""
self.ncpus = ncpus
self.time = 0.0
self.njobs = 0
self.rworker = rworker
class Template(object):
"""Template class
"""
def __init__(self, job_server, func, depfuncs=(), modules=(),
callback=None, callbackargs=(), group='default', globals=None):
"""Creates Template instance
jobs_server - pp server for submitting jobs
func - function to be executed
depfuncs - tuple with functions which might be called from 'func'
modules - tuple with module names to import
callback - callback function which will be called with argument
list equal to callbackargs+(result,)
as soon as calculation is done
callbackargs - additional arguments for callback function
group - job group, is used when wait(group) is called to wait for
jobs in a given group to finish
globals - dictionary from which all modules, functions and classes
will be imported, for instance: globals=globals()"""
self.job_server = job_server
self.func = func
self.depfuncs = depfuncs
self.modules = modules
self.callback = callback
self.callbackargs = callbackargs
self.group = group
self.globals = globals
def submit(self, *args):
"""Submits function with *arg arguments to the execution queue
"""
return self.job_server.submit(self.func, args, self.depfuncs,
self.modules, self.callback, self.callbackargs,
self.group, self.globals)
class Server(object):
"""Parallel Python SMP execution server class
"""
default_port = 60000
default_secret = "epo20pdosl;dksldkmm"
def __init__(self, ncpus="autodetect", ppservers=(), secret=None,
restart=False, proto=2, socket_timeout=3600):
"""Creates Server instance
ncpus - the number of worker processes to start on the local
computer, if parameter is omitted it will be set to
the number of processors in the system
ppservers - list of active parallel python execution servers
to connect with
secret - passphrase for network connections, if omitted a default
passphrase will be used. It's highly recommended to use a
custom passphrase for all network connections.
restart - whether to restart worker process after each task completion
proto - protocol number for pickle module
socket_timeout - socket timeout in seconds which is also the maximum
time a remote job could be executed. Increase this value
if you have long running jobs or decrease if connectivity
to remote ppservers is often lost.
With ncpus = 1 all tasks are executed consequently
For the best performance either use the default "autodetect" value
or set ncpus to the total number of processors in the system
"""
if not isinstance(ppservers, tuple):
raise TypeError("ppservers argument must be a tuple")
self.logger = logging.getLogger('pp')
self.logger.info("Creating server instance (pp-" + version+")")
self.logger.info("Running on Python %s %s", sys.version.split(" ")[0],
sys.platform)
self.__tid = 0
self.__active_tasks = 0
self.__active_tasks_lock = threading.Lock()
self.__queue = []
self.__queue_lock = threading.Lock()
self.__workers = []
self.__rworkers = []
self.__rworkers_reserved = []
self.__sourcesHM = {}
self.__sfuncHM = {}
self.__waittasks = []
self.__waittasks_lock = threading.Lock()
self._exiting = False
self.__accurate_stats = True
self.autopp_list = {}
self.__active_rworkers_list_lock = threading.Lock()
self.__restart_on_free = restart
self.__pickle_proto = proto
self.__connect_locks = {}
# add local directory and sys.path to PYTHONPATH
pythondirs = [os.getcwd()] + sys.path
if "PYTHONPATH" in os.environ and os.environ["PYTHONPATH"]:
pythondirs += os.environ["PYTHONPATH"].split(os.pathsep)
os.environ["PYTHONPATH"] = os.pathsep.join(set(pythondirs))
atexit.register(self.destroy)
self.__stats = {"local": _Statistics(0)}
self.set_ncpus(ncpus)
self.ppservers = []
self.auto_ppservers = []
self.socket_timeout = socket_timeout
for ppserver in ppservers:
ppserver = ppserver.split(":")
host = ppserver[0]
if len(ppserver)>1:
port = int(ppserver[1])
else:
port = Server.default_port
if host.find("*") == -1:
self.ppservers.append((host, port))
else:
if host == "*":
host = "*.*.*.*"
interface = host.replace("*", "0")
broadcast = host.replace("*", "255")
self.auto_ppservers.append(((interface, port),
(broadcast, port)))
self.__stats_lock = threading.Lock()
if secret is not None:
if not isinstance(secret, types.StringType):
raise TypeError("secret must be of a string type")
self.secret = str(secret)
elif hasattr(user, "pp_secret"):
secret = getattr(user, "pp_secret")
if not isinstance(secret, types.StringType):
raise TypeError("secret must be of a string type")
self.secret = str(secret)
else:
self.secret = Server.default_secret
self.__connect()
self.__creation_time = time.time()
self.logger.info("pp local server started with %d workers"
% (self.__ncpus, ))
def submit(self, func, args=(), depfuncs=(), modules=(),
callback=None, callbackargs=(), group='default', globals=None):
"""Submits function to the execution queue
func - function to be executed
args - tuple with arguments of the 'func'
depfuncs - tuple with functions which might be called from 'func'
modules - tuple with module names to import
callback - callback function which will be called with argument
list equal to callbackargs+(result,)
as soon as calculation is done
callbackargs - additional arguments for callback function
group - job group, is used when wait(group) is called to wait for
jobs in a given group to finish
globals - dictionary from which all modules, functions and classes
will be imported, for instance: globals=globals()
"""
# perform some checks for frequent mistakes
if self._exiting:
raise DestroyedServerError("Cannot submit jobs: server"\
" instance has been destroyed")
if not isinstance(args, tuple):
raise TypeError("args argument must be a tuple")
if not isinstance(depfuncs, tuple):
raise TypeError("depfuncs argument must be a tuple")
if not isinstance(modules, tuple):
raise TypeError("modules argument must be a tuple")
if not isinstance(callbackargs, tuple):
raise TypeError("callbackargs argument must be a tuple")
if globals is not None and not isinstance(globals, dict):
raise TypeError("globals argument must be a dictionary")
for module in modules:
if not isinstance(module, types.StringType):
raise TypeError("modules argument must be a list of strings")
tid = self.__gentid()
if globals:
modules += tuple(self.__find_modules("", globals))
modules = tuple(set(modules))
self.logger.debug("Task %i will autoimport next modules: %s" %
(tid, str(modules)))
for object1 in globals.values():
if isinstance(object1, types.FunctionType) \
or isinstance(object1, types.ClassType):
depfuncs += (object1, )
task = _Task(self, tid, callback, callbackargs, group)
self.__waittasks_lock.acquire()
self.__waittasks.append(task)
self.__waittasks_lock.release()
# if the function is a method of a class add self to the arguments list
if isinstance(func, types.MethodType) and func.im_self is not None:
args = (func.im_self, ) + args
# if there is an instance of a user deined class in the arguments add
# whole class to dependancies
for arg in args:
# Checks for both classic or new class instances
if isinstance(arg, types.InstanceType) \
or str(type(arg))[:6] == "<class":
# do not include source for imported modules
if ppcommon.is_not_imported(arg, modules):
depfuncs += tuple(ppcommon.get_class_hierarchy(arg.__class__))
# if there is a function in the arguments add this
# function to dependancies
for arg in args:
if isinstance(arg, types.FunctionType):
depfuncs += (arg, )
sfunc = self.__dumpsfunc((func, ) + depfuncs, modules)
sargs = pickle.dumps(args, self.__pickle_proto)
self.__queue_lock.acquire()
self.__queue.append((task, sfunc, sargs))
self.__queue_lock.release()
self.logger.debug("Task %i submited, function='%s'" %
(tid, func.func_name))
self.__scheduler()
return task
def restart_local_workers(self):
"""
Restart local worker processes as soon as all jobs are finished.
"""
self.wait()
for worker in self.__workers:
if not worker.restart_on_free:
worker.restart()
def wait(self, group=None):
"""Waits for all jobs in a given group to finish.
If group is omitted waits for all jobs to finish
"""
while True:
self.__waittasks_lock.acquire()
for task in self.__waittasks:
if not group or task.group == group:
self.__waittasks_lock.release()
task.wait()
break
else:
self.__waittasks_lock.release()
break
def get_ncpus(self):
"""Returns the number of local worker processes (ppworkers)"""
return self.__ncpus
def set_ncpus(self, ncpus="autodetect"):
"""Sets the number of local worker processes (ppworkers)
ncpus - the number of worker processes, if parammeter is omitted
it will be set to the number of processors in the system"""
if ncpus == "autodetect":
ncpus = self.__detect_ncpus()
if not isinstance(ncpus, int):
raise TypeError("ncpus must have 'int' type")
if ncpus < 0:
raise ValueError("ncpus must be an integer > 0")
if ncpus > len(self.__workers):
self.__workers.extend([_Worker(self.__restart_on_free,
self.__pickle_proto) for x in\
range(ncpus - len(self.__workers))])
self.__stats["local"].ncpus = ncpus
self.__ncpus = ncpus
def get_active_nodes(self):
"""Returns active nodes as a dictionary
[keys - nodes, values - ncpus]"""
active_nodes = {}
for node, stat in self.__stats.items():
if node == "local" or node in self.autopp_list \
and self.autopp_list[node]:
active_nodes[node] = stat.ncpus
return active_nodes
def get_stats(self):
"""Returns job execution statistics as a dictionary"""
for node, stat in self.__stats.items():
if stat.rworker:
try:
stat.rworker.send("TIME")
stat.time = float(stat.rworker.receive())
except:
self.__accurate_stats = False
stat.time = 0.0
return self.__stats
def print_stats(self):
"""Prints job execution statistics. Useful for benchmarking on
clusters"""
print "Job execution statistics:"
walltime = time.time() - self.__creation_time
statistics = self.get_stats().items()
totaljobs = 0.0
for ppserver, stat in statistics:
totaljobs += stat.njobs
print " job count | % of all jobs | job time sum | " \
"time per job | job server"
for ppserver, stat in statistics:
if stat.njobs:
print " %6i | %6.2f | %8.4f | %11.6f | %s" \
% (stat.njobs, 100.0*stat.njobs/totaljobs, stat.time,
stat.time/stat.njobs, ppserver, )
print "Time elapsed since server creation", walltime
print self.__active_tasks, "active tasks,", self.get_ncpus(), "cores"
if not self.__accurate_stats:
print "WARNING: statistics provided above is not accurate" \
" due to job rescheduling"
print
# all methods below are for internal use only
def insert(self, sfunc, sargs, task=None):
"""Inserts function into the execution queue. It's intended for
internal use only (ppserver.py).
"""
if not task:
tid = self.__gentid()
task = _Task(self, tid)
self.__queue_lock.acquire()
self.__queue.append((task, sfunc, sargs))
self.__queue_lock.release()
self.logger.debug("Task %i inserted" % (task.tid, ))
self.__scheduler()
return task
def connect1(self, host, port, persistent=True):
"""Conects to a remote ppserver specified by host and port"""
hostid = host+":"+str(port)
lock = self.__connect_locks.setdefault(hostid, threading.Lock())
lock.acquire()
try:
if hostid in self.autopp_list:
return
rworker = _RWorker(host, port, self.secret, self, "STAT", persistent, self.socket_timeout)
ncpus = int(rworker.receive())
self.__stats[hostid] = _Statistics(ncpus, rworker)
for x in range(ncpus):
rworker = _RWorker(host, port, self.secret, self, "EXEC", persistent, self.socket_timeout)
self.__update_active_rworkers(rworker.id, 1)
# append is atomic - no need to lock self.__rworkers
self.__rworkers.append(rworker)
#creating reserved rworkers
for x in range(ncpus):
rworker = _RWorker(host, port, self.secret, self, "EXEC", persistent, self.socket_timeout)
self.__update_active_rworkers(rworker.id, 1)
self.__rworkers_reserved.append(rworker)
self.logger.debug("Connected to ppserver (host=%s, port=%i) \
with %i workers" % (host, port, ncpus))
self.__scheduler()
except:
if SHOW_EXPECTED_EXCEPTIONS:
self.logger.debug("Exception in connect1 method (possibly expected)", exc_info=True)
finally:
lock.release()
def __connect(self):
"""Connects to all remote ppservers"""
for ppserver in self.ppservers:
ppcommon.start_thread("connect1", self.connect1, ppserver)
self.discover = ppauto.Discover(self, True)
for ppserver in self.auto_ppservers:
ppcommon.start_thread("discover.run", self.discover.run, ppserver)
def __detect_ncpus(self):
"""Detects the number of effective CPUs in the system"""
#for Linux, Unix and MacOS
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
#Linux and Unix
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else:
#MacOS X
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
#for Windows
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
#return the default value
return 1
def __dumpsfunc(self, funcs, modules):
"""Serializes functions and modules"""
hashs = hash(funcs + modules)
if hashs not in self.__sfuncHM:
sources = [self.__get_source(func) for func in funcs]
self.__sfuncHM[hashs] = pickle.dumps(
(funcs[0].func_name, sources, modules),
self.__pickle_proto)
return self.__sfuncHM[hashs]
def __find_modules(self, prefix, dict):
"""recursively finds all the modules in dict"""
modules = []
for name, object in dict.items():
if isinstance(object, types.ModuleType) \
and name not in ("__builtins__", "pp"):
if object.__name__ == prefix+name or prefix == "":
modules.append(object.__name__)
modules.extend(self.__find_modules(
object.__name__+".", object.__dict__))
return modules
def __scheduler(self):
"""Schedules jobs for execution"""
self.__queue_lock.acquire()
while self.__queue:
if self.__active_tasks < self.__ncpus:
#TODO: select a job number on the basis of heuristic
task = self.__queue.pop(0)
for worker in self.__workers:
if worker.is_free:
worker.is_free = False
break
else:
self.logger.error("There are no free workers left")
raise RuntimeError("Error: No free workers")
self.__add_to_active_tasks(1)
try:
self.__stats["local"].njobs += 1
ppcommon.start_thread("run_local", self._run_local, task+(worker, ))
except:
pass
else:
for rworker in self.__rworkers:
if rworker.is_free:
rworker.is_free = False
task = self.__queue.pop(0)
self.__stats[rworker.id].njobs += 1
ppcommon.start_thread("run_remote", self._run_remote, task+(rworker, ))
break
else:
if len(self.__queue) > self.__ncpus:
for rworker in self.__rworkers_reserved:
if rworker.is_free:
rworker.is_free = False
task = self.__queue.pop(0)
self.__stats[rworker.id].njobs += 1
ppcommon.start_thread("run_remote", self._run_remote, task+(rworker, ))
break
else:
break
else:
break
self.__queue_lock.release()
def __get_source(self, func):
"""Fetches source of the function"""
hashf = hash(func)
if hashf not in self.__sourcesHM:
#get lines of the source and adjust indent
sourcelines = inspect.getsourcelines(func)[0]
#remove indentation from the first line
sourcelines[0] = sourcelines[0].lstrip()
self.__sourcesHM[hashf] = "".join(sourcelines)
return self.__sourcesHM[hashf]
def _run_local(self, job, sfunc, sargs, worker):
"""Runs a job locally"""
if self._exiting:
return
self.logger.info("Task %i started", job.tid)
start_time = time.time()
try:
worker.t.csend(sfunc)
worker.t.send(sargs)
sresult = worker.t.receive()
job.finalize(sresult)
except:
if self._exiting:
return
if SHOW_EXPECTED_EXCEPTIONS:
self.logger.debug("Exception in _run_local (possibly expected)", exc_info=True)
# remove the job from the waiting list
if self.__waittasks:
self.__waittasks_lock.acquire()
self.__waittasks.remove(job)
self.__waittasks_lock.release()
worker.free()
self.__add_to_active_tasks(-1)
if not self._exiting:
self.__stat_add_time("local", time.time()-start_time)
self.logger.debug("Task %i ended", job.tid)
self.__scheduler()
def _run_remote(self, job, sfunc, sargs, rworker):
"""Runs a job remotelly"""
self.logger.debug("Task (remote) %i started", job.tid)
try:
rworker.csend(sfunc)
rworker.send(sargs)
sresult = rworker.receive()
rworker.is_free = True
job.finalize(sresult)
except:
self.logger.debug("Task %i failed due to broken network " \
"connection - rescheduling", job.tid)
self.insert(sfunc, sargs, job)
self.__scheduler()
self.__update_active_rworkers(rworker.id, -1)
if rworker.connect("EXEC"):
self.__update_active_rworkers(rworker.id, 1)
self.__scheduler()
return
# remove the job from the waiting list
if self.__waittasks:
self.__waittasks_lock.acquire()
self.__waittasks.remove(job)
self.__waittasks_lock.release()
self.logger.debug("Task (remote) %i ended", job.tid)
self.__scheduler()
def __add_to_active_tasks(self, num):
"""Updates the number of active tasks"""
self.__active_tasks_lock.acquire()
self.__active_tasks += num
self.__active_tasks_lock.release()
def __stat_add_time(self, node, time_add):
"""Updates total runtime on the node"""
self.__stats_lock.acquire()
self.__stats[node].time += time_add
self.__stats_lock.release()
def __stat_add_job(self, node):
"""Increments job count on the node"""
self.__stats_lock.acquire()
self.__stats[node].njobs += 1
self.__stats_lock.release()
def __update_active_rworkers(self, id, count):
"""Updates list of active rworkers"""
self.__active_rworkers_list_lock.acquire()
if id not in self.autopp_list:
self.autopp_list[id] = 0
self.autopp_list[id] += count
self.__active_rworkers_list_lock.release()
def __gentid(self):
"""Generates a unique job ID number"""
self.__tid += 1
return self.__tid - 1
def __del__(self):
self._exiting = True
def destroy(self):
"""Kills ppworkers and closes open files"""
self._exiting = True
self.__queue_lock.acquire()
self.__queue = []
self.__queue_lock.release()
for worker in self.__workers:
try:
worker.t.close()
if sys.platform.startswith("win"):
os.popen('TASKKILL /PID '+str(worker.pid)+' /F')
else:
os.kill(worker.pid, 9)
os.waitpid(worker.pid, 0)
except:
pass
class DestroyedServerError(RuntimeError):
pass
# Parallel Python Software: http://www.parallelpython.com
| []
| []
| [
"NUMBER_OF_PROCESSORS",
"PYTHONPATH"
]
| [] | ["NUMBER_OF_PROCESSORS", "PYTHONPATH"] | python | 2 | 0 | |
examples/simple_crud/fiber_v2/main.go | package main
import (
"examples/database"
"github.com/rinatusmanov/gorestfull/drivers/fiber_v2_driver"
"github.com/rinatusmanov/gorestfull/maker"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"os"
"github.com/gofiber/fiber/v2"
)
func main() {
app := fiber.New()
driver := fiber_v2_driver.NewDriver(app)
dsn := os.Getenv("dsn")
dbGlobal, errDbGlobal := gorm.Open(postgres.Open(dsn), &gorm.Config{
PrepareStmt: true,
})
if errDbGlobal != nil {
panic(errDbGlobal)
}
dbGlobal.
AutoMigrate(&database.Log{})
result, errCrud := maker.Maker(driver, dbGlobal)
if errCrud == nil {
result.Crud(database.Log{})
}
panic(app.Listen(":3000"))
}
| [
"\"dsn\""
]
| []
| [
"dsn"
]
| [] | ["dsn"] | go | 1 | 0 | |
test/functional/buildAndPackage/src/net/adoptopenjdk/test/FeatureTests.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.adoptopenjdk.test;
import org.testng.annotations.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
import static net.adoptopenjdk.test.JdkPlatform.Architecture;
import static net.adoptopenjdk.test.JdkPlatform.OperatingSystem;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
/**
* Tests the availability of various features like garbage collectors, flight recorder, that need to be enabled via
* command line flags.
*/
@Test(groups = {"level.extended"})
public class FeatureTests {
private static final Logger LOGGER = Logger.getLogger(FeatureTests.class.getName());
private final JdkVersion jdkVersion = new JdkVersion();
private final JdkPlatform jdkPlatform = new JdkPlatform();
/**
* Tests whether Shenandoah GC is available.
* <p/>
* Shenandoah GC was enabled by default with JDK 15 (JEP 379) and backported to 11.0.9.
*
* @see <a href="https://openjdk.java.net/jeps/379">JEP 379: Shenandoah: A Low-Pause-Time Garbage
* Collector (Production)</a>
* @see <a href="https://bugs.openjdk.java.net/browse/JDK-8250784">JDK-8250784 (Backport)</a>
* @see <a href="https://wiki.openjdk.java.net/display/shenandoah/Main#Main-SupportOverview">Shenandoah Support
* Overview</a>
*/
@Test
public void testShenandoahAvailable() {
String testJdkHome = System.getenv("TEST_JDK_HOME");
if (testJdkHome == null) {
throw new AssertionError("TEST_JDK_HOME is not set");
}
boolean shouldBePresent = false;
if ((jdkVersion.isNewerOrEqual(15) || jdkVersion.isNewerOrEqualSameFeature(11, 0, 9))) {
if (jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.AARCH64)
|| jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.X86)
|| jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.MACOS, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.MACOS, Architecture.AARCH64)
|| jdkPlatform.runsOn(OperatingSystem.WINDOWS, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.WINDOWS, Architecture.AARCH64)
) {
shouldBePresent = true;
}
}
LOGGER.info(String.format("Detected %s on %s, expect Shenandoah to be present: %s",
jdkVersion, jdkPlatform, shouldBePresent));
List<String> command = new ArrayList<>();
command.add(String.format("%s/bin/java", testJdkHome));
command.add("-XX:+UseShenandoahGC");
command.add("-version");
try {
ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.inheritIO();
int retCode = processBuilder.start().waitFor();
if (shouldBePresent) {
assertEquals(retCode, 0, "Expected Shenandoah to be present but it is absent.");
} else {
assertTrue(retCode > 0, "Expected Shenandoah to be absent but it is present.");
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException("Failed to launch JVM", e);
}
}
/**
* Tests whether Z Garbage Collector is available.
* <p/>
* Z Garbage Collector was enabled by default with JDK 15 (JEP 377).
*
* @see <a href="https://openjdk.java.net/jeps/377">JEP 377: ZGC: A Scalable Low-Latency Garbage Collector
* (Production)</a>
*/
@Test
public void testZGCAvailable() {
String testJdkHome = System.getenv("TEST_JDK_HOME");
if (testJdkHome == null) {
throw new AssertionError("TEST_JDK_HOME is not set");
}
boolean shouldBePresent = false;
if (jdkVersion.isNewerOrEqual(15)) {
if (jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.AARCH64)
|| jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.MACOS, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.WINDOWS, Architecture.X64)
) {
shouldBePresent = true;
}
}
LOGGER.info(String.format("Detected %s on %s, expect ZGC to be present: %s",
jdkVersion, jdkPlatform, shouldBePresent));
List<String> command = new ArrayList<>();
command.add(String.format("%s/bin/java", testJdkHome));
command.add("-XX:+UseZGC");
command.add("-version");
try {
ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.inheritIO();
int retCode = processBuilder.start().waitFor();
if (shouldBePresent) {
assertEquals(retCode, 0, "Expected ZGC to be present but it is absent.");
} else {
assertTrue(retCode > 0, "Expected ZGC to be absent but it is present.");
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException("Failed to launch JVM", e);
}
}
/**
* Tests whether JDK Flight Recorder is available.
* <p/>
* JDK Flight recorder was added to JDK 11 (JEP 328) and backported to JDK 8u262.
*
* @see <a href="https://openjdk.java.net/jeps/328">JEP 328: Flight Recorder</a>
* @see <a href="https://bugs.openjdk.java.net/browse/JDK-8223147>JDK-8223147 (backport to 8u262)</a>
*/
@Test
public void testJFRAvailable() {
String testJdkHome = System.getenv("TEST_JDK_HOME");
if (testJdkHome == null) {
throw new AssertionError("TEST_JDK_HOME is not set");
}
boolean shouldBePresent = false;
if (jdkVersion.isNewerOrEqual(11) || jdkVersion.isNewerOrEqualSameFeature(8, 0, 262)) {
shouldBePresent = true;
}
LOGGER.info(String.format("Detected %s on %s, expect JFR to be present: %s",
jdkVersion, jdkPlatform, shouldBePresent));
List<String> command = new ArrayList<>();
command.add(String.format("%s/bin/java", testJdkHome));
command.add("-XX:StartFlightRecording");
command.add("-version");
try {
ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.inheritIO();
int retCode = processBuilder.start().waitFor();
if (shouldBePresent) {
assertEquals(retCode, 0, "Expected JFR to be present but it is absent.");
} else {
assertTrue(retCode > 0, "Expected JFR to be absent but it is present.");
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException("Failed to launch JVM", e);
}
}
} | [
"\"TEST_JDK_HOME\"",
"\"TEST_JDK_HOME\"",
"\"TEST_JDK_HOME\""
]
| []
| [
"TEST_JDK_HOME"
]
| [] | ["TEST_JDK_HOME"] | java | 1 | 0 | |
python/runtime/optimize/optflow_submit.py | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import uuid
import oss2
import requests
import six
from runtime.pai.oss import get_bucket
__all__ = [
'run_optimize_on_optflow',
]
OPTFLOW_HTTP_HEADERS = {
'content-type': 'application/json',
'accept': 'application/json',
}
def query_optflow_job_status(url, record_id, user_number, token):
url = "{}?userNumber={}&recordId={}&token={}".format(
url, user_number, record_id, token)
response = requests.get(url, headers=OPTFLOW_HTTP_HEADERS)
response.raise_for_status()
response_json = response.json()
if not response_json['success']:
raise ValueError('cannot get status of job {}'.format(record_id))
return response_json['data']['status'].lower()
def query_optflow_job_log(url, record_id, user_number, token, start_line_num):
url = "{}?userNumber={}&recordId={}&token={}".format(
url, user_number, record_id, token)
response = requests.get(url, headers=OPTFLOW_HTTP_HEADERS, stream=True)
response.raise_for_status()
response_json = response.json()
if not response_json['success']:
raise ValueError('cannot get log of job {}'.format(record_id))
logs = response_json['data']['logs']
end_line_num = len(logs)
# NOTE(sneaxiy): ascii(log) is necessary because the character inside
# log may be out of the range of ASCII characters.
# The slice [1:-1] is used to remove the quotes. e.g.:
# original string "abc" -> ascii("abc") outputs "'abc'"
# -> the slice [1:-1] outputs "abc"
logs = [ascii(log)[1:-1] for log in logs[start_line_num:]]
return logs, end_line_num
def print_job_log_till_finish(status_url, log_url, record_id, user_number,
token):
def call_func_with_retry(func, times):
for _ in six.moves.range(times - 1):
try:
return func()
except:
pass
return func()
status = None
line_num = 0
while True:
query_status = lambda: query_optflow_job_status(
status_url, record_id, user_number, token)
query_log = lambda: query_optflow_job_log(log_url, record_id,
user_number, token, line_num)
status = call_func_with_retry(query_status, 3)
logs, line_num = call_func_with_retry(query_log, 3)
for log in logs:
print(log)
# status may be 'success', 'failed', 'running', 'prepare'
if status in ['success', 'failed']:
break
time.sleep(2) # sleep for some times
return status == 'success'
def submit_optflow_job(train_table, result_table, fsl_file_content, solver,
user_number):
project_name = train_table.split(".")[0]
snapshot_id = os.getenv("SQLFLOW_OPTFLOW_SNAPSHOT_ID")
if not snapshot_id:
raise ValueError("SQLFLOW_OPTFLOW_SNAPSHOT_ID must be set")
token = os.getenv("SQLFLOW_OPTFLOW_TOKEN")
if not token:
raise ValueError("SQLFLOW_OPTFLOW_TOKEN must be set")
submit_job_url = os.getenv("SQLFLOW_OPTFLOW_SUBMIT_JOB_URL")
if not submit_job_url:
raise ValueError("SQLFLOW_OPTFLOW_SUBMIT_JOB_URL must be set")
query_job_status_url = os.getenv("SQLFLOW_OPTFLOW_QUERY_JOB_STATUS_URL")
if not query_job_status_url:
raise ValueError("SQLFLOW_OPTFLOW_QUERY_JOB_STATUS_URL must be set")
query_job_log_url = os.getenv("SQLFLOW_OPTFLOW_QUERY_JOB_LOG_URL")
if not query_job_log_url:
raise ValueError("SQLFLOW_OPTFLOW_QUERY_JOB_LOG_URL must be set")
bucket_name = "sqlflow-optflow-models"
bucket = get_bucket(bucket_name)
try:
bucket_info = bucket.get_bucket_info()
except oss2.exceptions.NoSuchBucket:
# Create bucket if not exists
bucket.create_bucket()
bucket_info = bucket.get_bucket_info()
fsl_file_id = '{}.fsl'.format(uuid.uuid4())
bucket.put_object(fsl_file_id, fsl_file_content)
should_delete_object = True
try:
bucket.put_object_acl(fsl_file_id, oss2.BUCKET_ACL_PUBLIC_READ)
fsl_url = "http://{}.{}/{}".format(bucket_name,
bucket_info.extranet_endpoint,
fsl_file_id)
input_params = {
"input_table": train_table,
"output_table": result_table,
"fsl_path": fsl_url,
"solver_name": solver,
}
json_data = {
"userNumber": user_number,
"projectName": project_name,
"snapshotId": snapshot_id,
"token": token,
"inputParams": input_params,
}
response = requests.post(submit_job_url,
json=json_data,
headers=OPTFLOW_HTTP_HEADERS)
response.raise_for_status()
response_json = response.json()
if not response_json['success']:
raise ValueError("Job submission fails")
print('Job submission succeeds')
record_id = response_json['data']['recordId']
try:
success = print_job_log_till_finish(query_job_status_url,
query_job_log_url, record_id,
user_number, token)
if success:
print("Job succeeds. Save solved result in {}.".format(
result_table))
else:
print("Job fails.")
except:
# FIXME(sneaxiy): we should not delete object if there is any
# network error when querying job status and logs. But when
# should we clean the object?
should_delete_object = False
six.reraise(*sys.exc_info())
finally:
if should_delete_object:
bucket.delete_object(fsl_file_id)
def run_optimize_on_optflow(train_table, variables, variable_type,
result_value_name, objective_expression, direction,
constraint_expressions, solver, result_table,
user_number):
if direction.lower() == "maximize":
direction = "max"
elif direction.lower() == "minimize":
direction = "min"
else:
raise ValueError("direction must be maximize or minimize")
fsl_file_content = '''
variables: {}
var_type: {}
objective: {}
{}
constraints:
{}
'''.format(",".join(variables), variable_type, direction, objective_expression,
"\n".join(constraint_expressions))
submit_optflow_job(train_table=train_table,
result_table=result_table,
fsl_file_content=fsl_file_content,
solver=solver,
user_number=user_number)
| []
| []
| [
"SQLFLOW_OPTFLOW_QUERY_JOB_STATUS_URL",
"SQLFLOW_OPTFLOW_QUERY_JOB_LOG_URL",
"SQLFLOW_OPTFLOW_TOKEN",
"SQLFLOW_OPTFLOW_SNAPSHOT_ID",
"SQLFLOW_OPTFLOW_SUBMIT_JOB_URL"
]
| [] | ["SQLFLOW_OPTFLOW_QUERY_JOB_STATUS_URL", "SQLFLOW_OPTFLOW_QUERY_JOB_LOG_URL", "SQLFLOW_OPTFLOW_TOKEN", "SQLFLOW_OPTFLOW_SNAPSHOT_ID", "SQLFLOW_OPTFLOW_SUBMIT_JOB_URL"] | python | 5 | 0 | |
test/e2e/storage/vsphere_volume_node_poweroff.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"os"
"path/filepath"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"golang.org/x/net/context"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
/*
Test to verify volume status after node power off:
1. Verify the pod got provisioned on a different node with volume attached to it
2. Verify the volume is detached from the powered off node
*/
var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("node-poweroff")
var (
client clientset.Interface
namespace string
vsp *vsphere.VSphere
workingDir string
err error
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty())
})
/*
Steps:
1. Create a StorageClass
2. Create a PVC with the StorageClass
3. Create a Deployment with 1 replica, using the PVC
4. Verify the pod got provisioned on a node
5. Verify the volume is attached to the node
6. Power off the node where pod got provisioned
7. Verify the pod got provisioned on a different node
8. Verify the volume is attached to the new node
9. Verify the volume is detached from the old node
10. Delete the Deployment and wait for the volume to be detached
11. Delete the PVC
12. Delete the StorageClass
*/
It("verify volume status after node power off", func() {
By("Creating a Storage Class")
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, "1Gi", storageclass)
pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create PVC with err: %v", err))
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
volumePath := pvs[0].Spec.VsphereVolume.VolumePath
By("Creating a Deployment")
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, namespace, pvclaims, "")
defer client.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement")
podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
node1 := types.NodeName(pod.Spec.NodeName)
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, node1)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
By(fmt.Sprintf("Power off the node: %v", node1))
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
Expect(err).NotTo(HaveOccurred())
f := find.NewFinder(govMoMiClient.Client, true)
ctx, _ := context.WithCancel(context.Background())
vmPath := filepath.Join(workingDir, string(node1))
vm, err := f.VirtualMachine(ctx, vmPath)
Expect(err).NotTo(HaveOccurred())
_, err = vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
defer vm.PowerOn(ctx)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
// Waiting for the pod to be failed over to a different node
node2, err := waitForPodToFailover(client, deployment, node1)
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(client, vsp, volumePath, node2)
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, node1)
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1))
vm.PowerOn(ctx)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
})
})
// Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode types.NodeName) (types.NodeName, error) {
var (
err error
newNode types.NodeName
timeout = 3 * time.Minute
pollTime = 10 * time.Second
)
err = wait.Poll(pollTime, timeout, func() (bool, error) {
newNode, err = getNodeForDeployment(client, deployment)
if err != nil {
return true, err
}
if newNode != oldNode {
framework.Logf("The pod has been failed over from %q to %q", oldNode, newNode)
return true, nil
}
framework.Logf("Waiting for pod to be failed over from %q", oldNode)
return false, nil
})
if err != nil {
if err == wait.ErrWaitTimeout {
framework.Logf("Time out after waiting for %v", timeout)
}
framework.Logf("Pod did not fail over from %q with error: %v", oldNode, err)
return "", err
}
return getNodeForDeployment(client, deployment)
}
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (types.NodeName, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err
}
return types.NodeName(podList.Items[0].Spec.NodeName), nil
}
| [
"\"VSPHERE_WORKING_DIR\""
]
| []
| [
"VSPHERE_WORKING_DIR"
]
| [] | ["VSPHERE_WORKING_DIR"] | go | 1 | 0 | |
examples/v2/incidents/UpdateIncident_3369341440.go | // Add commander to an incident returns "OK" response
package main
import (
"context"
"encoding/json"
"fmt"
"os"
datadog "github.com/DataDog/datadog-api-client-go/api/v2/datadog"
)
func main() {
// there is a valid "incident" in the system
IncidentDataID := os.Getenv("INCIDENT_DATA_ID")
// there is a valid "user" in the system
UserDataID := os.Getenv("USER_DATA_ID")
body := datadog.IncidentUpdateRequest{
Data: datadog.IncidentUpdateData{
Id: IncidentDataID,
Type: datadog.INCIDENTTYPE_INCIDENTS,
Relationships: &datadog.IncidentUpdateRelationships{
CommanderUser: &datadog.NullableRelationshipToUser{
Data: *datadog.NewNullableNullableRelationshipToUserData(&datadog.NullableRelationshipToUserData{
Id: UserDataID,
Type: datadog.USERSTYPE_USERS,
}),
},
},
},
}
ctx := datadog.NewDefaultContext(context.Background())
configuration := datadog.NewConfiguration()
configuration.SetUnstableOperationEnabled("UpdateIncident", true)
apiClient := datadog.NewAPIClient(configuration)
resp, r, err := apiClient.IncidentsApi.UpdateIncident(ctx, IncidentDataID, body, *datadog.NewUpdateIncidentOptionalParameters())
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `IncidentsApi.UpdateIncident`: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
responseContent, _ := json.MarshalIndent(resp, "", " ")
fmt.Fprintf(os.Stdout, "Response from `IncidentsApi.UpdateIncident`:\n%s\n", responseContent)
}
| [
"\"INCIDENT_DATA_ID\"",
"\"USER_DATA_ID\""
]
| []
| [
"USER_DATA_ID",
"INCIDENT_DATA_ID"
]
| [] | ["USER_DATA_ID", "INCIDENT_DATA_ID"] | go | 2 | 0 | |
mail_notifications/celery.py | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mail_notifications.settings')
app = Celery('mail_notifications')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
namecheap/provider.go | package namecheap
import (
"fmt"
"log"
"os"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
var (
errTooManyRetries = fmt.Errorf("exceeded max retry limit")
)
// These are the "Auto" TTL settings in Namecheap
const (
ncDefaultTTL int = 1799
ncDefaultMXPref int = 10
ncDefaultTimeout time.Duration = 30
)
// Provider returns a terraform.ResourceProvider.
func Provider() *schema.Provider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"username": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("NAMECHEAP_USERNAME", nil),
Description: "A registered username for namecheap",
},
"api_user": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("NAMECHEAP_API_USER", nil),
Description: "A registered apiuser for namecheap",
},
"token": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("NAMECHEAP_TOKEN", nil),
Description: "The token key for API operations.",
},
"ip": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("NAMECHEAP_IP", nil),
Description: "IP addess of the machine running terraform",
},
"use_sandbox": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("NAMECHEAP_USE_SANDBOX", false),
Description: "If true, use the namecheap sandbox",
},
},
ResourcesMap: map[string]*schema.Resource{
"namecheap_record": resourceNameCheapRecord(),
"namecheap_ns": resourceNameCheapNS(),
},
ConfigureFunc: providerConfigure,
}
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
log.Printf("[ROB] NAMECHEAP_USE_SANDBOX: %s; use_sandbox: %v", os.Getenv("NAMECHEAP_USE_SANDBOX"), d.Get("use_sandbox"))
config := Config{
username: d.Get("username").(string),
apiUser: d.Get("api_user").(string),
token: d.Get("token").(string),
ip: d.Get("ip").(string),
useSandbox: d.Get("use_sandbox").(bool),
}
return config.Client()
}
// retryAPICall attempts a specific calllback several times with greater pause between attempts.
// The callback should be responsible for modifying state and cleaning up any resources.
func retryAPICall(f func() error) error {
attempts, max := 0, 5
for {
attempts++
if attempts > max {
log.Printf("[ERROR] API Retry Limit Reached.")
return errTooManyRetries
}
if err := f(); err != nil {
log.Printf("[INFO] Err: %v", err.Error())
if strings.Contains(err.Error(), "expected element type <ApiResponse> but have <html>") {
log.Printf("[WARN] Bad Namecheap API response received, backing off for %d seconds...", attempts)
time.Sleep(time.Duration(attempts) * time.Second)
continue // retry
}
return fmt.Errorf("Failed to create namecheap Record: %s", err)
}
return nil
}
}
| [
"\"NAMECHEAP_USE_SANDBOX\""
]
| []
| [
"NAMECHEAP_USE_SANDBOX"
]
| [] | ["NAMECHEAP_USE_SANDBOX"] | go | 1 | 0 | |
pkg/kotsadm/objects/affinity_objects.go | package kotsadm
import (
corev1 "k8s.io/api/core/v1"
)
func defaultKotsNodeAffinity() *corev1.NodeAffinity {
return &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: "kubernetes.io/os",
Operator: corev1.NodeSelectorOpIn,
Values: []string{
"linux",
},
},
{
Key: "kubernetes.io/arch",
Operator: corev1.NodeSelectorOpNotIn,
Values: []string{
"arm64",
},
},
},
},
},
},
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
app.py | import os
import subprocess
import tempfile
import re
from flask import Flask, request
from google.cloud import storage
app = Flask(__name__)
def download_blob(gcs_uri, destination_file_name):
"""Downloads a blob from the bucket."""
bucket_name = os.path.split(gcs_uri.strip())[0].replace("gs://", "")
client = storage.Client()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(gcs_uri.replace("gs://" + bucket_name + "/", ""))
with open(destination_file_name, "wb") as f:
client.download_blob_to_file(blob, f)
def upload_blob(gcs_uri, source_file_name):
"""Uploads a file to the bucket."""
bucket_name = os.path.split(gcs_uri)[0].replace("gs://", "")
client = storage.Client()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(gcs_uri.replace("gs://" + bucket_name + "/", ""))
blob.upload_from_filename(source_file_name)
@app.route('/compile', methods=['POST'])
def compile():
with tempfile.TemporaryDirectory() as tdir:
gcs_src_uri = request.form['gcs_src_uri']
gcs_dst_uri = request.form['gcs_dst_uri']
src_file_paths = []
for gcs_uri in gcs_src_uri.split(","):
filename = os.path.basename(gcs_uri)
src_file_path = os.path.join(tdir, filename)
download_blob(gcs_uri, src_file_path)
src_file_paths.append(src_file_path)
cmd_list = ["edgetpu_compiler"] + src_file_paths + ["-o", tdir]
proc = subprocess.run(cmd_list,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.stdout.decode("utf8")
m = re.search(r"Output model: .*", ret)
if m:
dst_file_path = m.group(0).replace("Output model: ", "")
upload_blob(gcs_dst_uri, dst_file_path)
return ret
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0',
port=int(os.environ.get('PORT', 8080)))
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
run.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
from buildModel import define_model,pretrained_embedding_layer
from utils import read_glove_vecs,sentences_to_indices,label_to_emoji
maxLen = 10
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('glove.6B.50d.txt')
model = define_model((maxLen,), word_to_vec_map, word_to_index)
model.load_weights('emojify.h5')
def emojify(sentences):
sentences = sentences.split('.')
out = []
for s in sentences:
if len(s)!=0:
s_arr = np.array([s])
s_indices = sentences_to_indices(s_arr, word_to_index, maxLen)
out.append(s+' ' + label_to_emoji(np.argmax(model.predict(s_indices))))
out = '.'.join(out) + '.'
return out
sentences = input("Please Enter your sentences to Emojify:\n ")
print("===" * 10)
print("Your Emojified Sentence:\n",emojify(sentences))
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
edit_videos.py | import os, json, csv, glob
from datetime import datetime
from datacite import DataCiteMDSClient,schema40
from caltechdata_api import caltechdata_edit, get_metadata
filename = "dois.csv"
# Get access token from TIND sed as environment variable with source token.bash
token = os.environ["TINDTOK"]
production = True
today = datetime.today().date().isoformat()
with open(filename, encoding="utf-8-sig") as csvfile:
filename_to_id = {}
reader = csv.DictReader(csvfile)
for row in reader:
doi = row['DOI']
idv = doi.split('D1.')[1]
fname = row['movie']
fpath = ('cellatlas-videos/'+fname)
metadata = get_metadata(idv,validate=False)
for date in metadata['dates']:
if date['dateType'] == 'Updated':
date['date'] = today
new_descr = []
for description in metadata['descriptions']:
if description['descriptionType'] == 'SeriesInformation':
description['description'] = 'Atlas of Bacterial and Archaeal Cell Structure'
if not description['description'].startswith("<br>Cite this record as:"):
new_descr.append(description)
metadata['descriptions'] = new_descr
new_id = []
for identifier in metadata['relatedIdentifiers']:
if identifier['relatedIdentifier'] != None:
new_id.append(identifier)
metadata['relatedIdentifiers'] = new_id
response = caltechdata_edit(token, idv, metadata, {fpath}, {'mp4'}, production)
print(response)
| []
| []
| [
"TINDTOK"
]
| [] | ["TINDTOK"] | python | 1 | 0 | |
Kai/crab/NANOv7_Fri13/2018/ttt/crab_cfg_2018_tttt.py | import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_tttt'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.maxMemoryMB = 2000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_tttt.sh'
config.JobType.inputFiles = ['crab_script_2018_tttt.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = ['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/TTTT_TuneCP5_13TeV-amcatnlo-pythia8/RunIIAutumn18NanoAODv7-Nano02Apr2020_102X_upgrade2018_realistic_v21_ext2-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
config.Data.outLFNDirBase = '/store/user/{user}/Fri13'.format(user=getUsernameFromCRIC())
config.Data.publication = True
config.Data.outputDatasetTag = 'Fri13'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
| []
| []
| [
"CMSSW_BASE"
]
| [] | ["CMSSW_BASE"] | python | 1 | 0 | |
calorie_project/asgi.py | """
ASGI config for calorie_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calorie_project.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
bot/settings_files/_global.py | import os
import settings_files.rules_contents
PREFIX = "+"
NO_SSL = os.getenv("NO_SSL", False)
NODB = os.getenv("NODB", False)
BOT_TOKEN = str(os.getenv("BOT_TOKEN", False))
BOT_ID = int(os.getenv("BOT_ID", False))
DATABASE_URL = str(os.getenv("DATABASE_URL", False))
GUILD_ID = int(os.getenv("GUILD_ID", False))
STARBOARD_ID = int(os.getenv("STARBOARD_ID", False))
GINNY_TRANSPARENT_URL = str(os.getenv("GINNY_TRANSPARENT_URL", False))
GINNY_WHITE_URL = str(os.getenv("GINNY_WHITE_URL", False))
DB_TMER_REMINDER = 1
DB_TMER_GIVEAWAY = 2
DB_TMER_REPOST = 3
ERROR_CHANNEL_NAME = "maintenance-channel"
ERROR_CHANNEL_ID = 376031149371162635
NO_RESPONSE_CHANNELS = [
"mod-chat",
"rules",
"giveaways",
"alliance-mod-chat",
"starboard",
"events",
"event-submission",
]
GIVEAWAY_CHANNEL_NAME = "giveaways"
LOGS_CHANNEL_NAME = "mod-log"
INVITE_LINK = "https://discord.gg/x7nsqEj"
MOD_ROLES = ["Moderator", "Guides of the Void"]
REEPOSTER_NAME = "REE-poster"
REEPOSTER_EMOJI = "FEELSREEE"
SOFT_RED = 0xCD6D6D
STARBOARD_COLOUR = 0xFFAC33
DARK_BLUE = 0x00008B
DARK_ORANGE = 0xFF5E13
PALE_GREEN = 0xBCF5BC
PALE_YELLOW = 0xFDFF96
GREY = 0x6A6866
PALE_BLUE = 0xADD8E6
LIGHT_BLUE = 0x1F75FE
VIE_PURPLE = 0xA569BD
EMPTY = "\u200b"
TADA = "🎉"
FEL_ID = 235055132843180032
DER_ID = 295553857054834690
JORM_ID = 168350377824092160
BOT_SPAM_ID = 748506600880210452
| []
| []
| [
"NO_SSL",
"BOT_ID",
"DATABASE_URL",
"GINNY_TRANSPARENT_URL",
"BOT_TOKEN",
"STARBOARD_ID",
"GUILD_ID",
"NODB",
"GINNY_WHITE_URL"
]
| [] | ["NO_SSL", "BOT_ID", "DATABASE_URL", "GINNY_TRANSPARENT_URL", "BOT_TOKEN", "STARBOARD_ID", "GUILD_ID", "NODB", "GINNY_WHITE_URL"] | python | 9 | 0 | |
vendor/git.lukeshu.com/go/libsystemd/sd_daemon/notify_linux.go | // Incorporates: git://github.com/docker/docker.git 18c7c67308bd4a24a41028e63c2603bb74eac85e pkg/systemd/sd_notify.go
// Incorporates: git://github.com/coreos/go-systemd.git a606a1e936df81b70d85448221c7b1c6d8a74ef1 daemon/sdnotify.go
//
// Copyright 2013, 2015 Docker, Inc.
// Copyright 2014 CoreOS, Inc.
// Copyright 2015-2019 Luke Shumaker
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package sd_daemon
import (
"bytes"
"net"
"os"
"golang.org/x/sys/unix"
)
func (msg Notification) send(unsetEnv bool) error {
if unsetEnv {
defer func() { _ = os.Unsetenv("NOTIFY_SOCKET") }()
}
socketAddr := &net.UnixAddr{
Name: os.Getenv("NOTIFY_SOCKET"),
Net: "unixgram",
}
if socketAddr.Name == "" {
return ErrDisabled
}
conn, err := socketUnixgram(socketAddr.Name)
if err != nil {
return err
}
defer func() { _ = conn.Close() }()
var cmsgs [][]byte
if len(msg.Files) > 0 {
fds := make([]int, len(msg.Files))
for i := range msg.Files {
fds[i] = int(msg.Files[i].Fd())
}
cmsg := unix.UnixRights(fds...)
cmsgs = append(cmsgs, cmsg)
}
havePid := msg.PID > 0 && msg.PID != os.Getpid()
if havePid {
cmsg := unix.UnixCredentials(&unix.Ucred{
Pid: int32(msg.PID),
Uid: uint32(os.Getuid()),
Gid: uint32(os.Getgid()),
})
cmsgs = append(cmsgs, cmsg)
}
// If the 2nd argument is empty, this is equivalent to
//
// conn, _ := net.DialUnix(socketAddr.Net, nil, socketAddr)
// conn.Write([]byte(msg.State))
_, _, err = conn.WriteMsgUnix([]byte(msg.State), bytes.Join(cmsgs, nil), socketAddr)
if err != nil && havePid {
// Maybe it failed because we don't have privileges to
// spoof our pid; retry without spoofing the pid.
//
// I'm not too happy that we do this silently without
// notifying the caller, but that's what
// sd_pid_notify_with_fds does.
cmsgs = cmsgs[:len(cmsgs)-1]
_, _, err = conn.WriteMsgUnix([]byte(msg.State), bytes.Join(cmsgs, nil), socketAddr)
}
return err
}
// socketUnixgram wraps socket(2), but doesn't bind(2) or connect(2)
// the socket to anything. This is an ugly hack because none of the
// functions in "net" actually allow you to get a AF_UNIX socket not
// bound/connected to anything.
//
// At some point you begin to question if it is worth it to keep up
// the high-level interface of "net", and messing around with FileConn
// and UnixConn. Maybe we just drop to using unix.Socket and
// unix.SendmsgN directly.
//
// See: net/sock_cloexec.go:sysSocket()
func socketUnixgram(name string) (*net.UnixConn, error) {
// Don't bother with SOCK_NONBLOCK, net.FileConn() will call
// syscall.SetNonblock().
//
// BUG(lukeshu): On Linux, Notification.Send() depends on
// SOCK_CLOEXEC in Linux 2.6.27 (2008-10-09), which is
// slightly newer than Go itself depends on, 2.6.23
// (2007-10-09).
fd, err := unix.Socket(unix.AF_UNIX, unix.SOCK_DGRAM|unix.SOCK_CLOEXEC, 0)
if err != nil {
return nil, os.NewSyscallError("socket", err)
}
defer unix.Close(fd)
conn, err := net.FileConn(os.NewFile(uintptr(fd), name))
if err != nil {
return nil, err
}
unixConn := conn.(*net.UnixConn)
return unixConn, nil
}
| [
"\"NOTIFY_SOCKET\""
]
| []
| [
"NOTIFY_SOCKET"
]
| [] | ["NOTIFY_SOCKET"] | go | 1 | 0 | |
src/metrics.go | package main
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/jeremywohl/flatten"
"github.com/newrelic/infra-integrations-sdk/data/metric"
"github.com/newrelic/infra-integrations-sdk/log"
)
var metricsStandardDefinition = map[string][]interface{}{
"software.edition": {"edition", metric.ATTRIBUTE},
"software.version": {"version", metric.ATTRIBUTE},
"net.connectionsActive": {"active", metric.GAUGE},
"net.connectionsAcceptedPerSecond": {"accepted", metric.RATE},
"net.connectionsDroppedPerSecond": {connectionsDropped, metric.RATE},
"net.connectionsReading": {"reading", metric.GAUGE},
"net.connectionsWaiting": {"waiting", metric.GAUGE},
"net.connectionsWriting": {"writing", metric.GAUGE},
"net.requestsPerSecond": {"requests", metric.RATE},
}
var metricsPlusDefinition = map[string][]interface{}{
"software.edition": {"edition", metric.ATTRIBUTE},
"software.version": {"version", metric.ATTRIBUTE},
"net.connectionsActive": {"connections.active", metric.GAUGE},
"net.connectionsIdle": {"connections.idle", metric.GAUGE},
"net.connectionsAcceptedPerSecond": {"connections.accepted", metric.RATE},
"net.connectionsDroppedPerSecond": {"connections.dropped", metric.RATE},
"net.requestsPerSecond": {"requests.total", metric.RATE},
"processes.respawned": {"processes.respawned", metric.DELTA},
"ssl.handshakes": {"ssl.handshakes", metric.DELTA},
"ssl.failedHandshakes": {"ssl.handshakes_failed", metric.DELTA},
"ssl.sessionReuses": {"ssl.session_reuses", metric.DELTA},
}
var metricsPlusAPIDefinition = map[string][]interface{}{
"software.version": {"nginx.version", metric.ATTRIBUTE},
"connections.active": {"net.connectionsActive", metric.GAUGE},
"connections.idle": {"net.connectionsIdle", metric.GAUGE},
"connections.accepted": {"net.connectionsAcceptedPerSecond", metric.RATE},
"connections.dropped": {"net.connectionsDroppedPerSecond", metric.RATE},
"processes.respawned": {"processes.respawned", metric.DELTA},
"ssl.handshakes": {"ssl.handshakes", metric.DELTA},
"ssl.handshakes_failed": {"ssl.failedHandshakes", metric.DELTA},
"ssl.session_reuses": {"ssl.sessionReuses", metric.DELTA},
"http.requests.total": {"net.requestsPerSecond", metric.RATE},
"http.requests.current": {"net.requests", metric.GAUGE},
}
// expressions contains the structure of the input data and defines the attributes we want to store
var nginxStatusExpressions = []*regexp.Regexp{
regexp.MustCompile(`Active connections:\s+(?P<active>\d+)`),
nil,
regexp.MustCompile(`\s*(?P<accepted>\d+)\s+(?P<handled>\d+)\s+(?P<requests>\d+)`),
regexp.MustCompile(`Reading: (?P<reading>\d+)\s+Writing: (?P<writing>\d+)\s+Waiting: (?P<waiting>\d+)`),
}
func connectionsDropped(metrics map[string]interface{}) (int, bool) {
accepts, ok1 := metrics["accepted"].(int)
handled, ok2 := metrics["handled"].(int)
if ok1 && ok2 {
return accepts - handled, true
}
return 0, false
}
// getMetrics reads an NGINX (open edition) status message and transforms its
// contents into a map that can be processed by NR agent.
// It returns a map of metrics with all the keys and values extracted from the
// status endpoint.
func getStandardMetrics(reader *bufio.Reader) (map[string]interface{}, error) {
metrics := make(map[string]interface{})
for lineNo, re := range nginxStatusExpressions {
line, err := reader.ReadString('\n')
if err == io.EOF {
return metrics, nil
}
if re == nil {
continue
}
match := re.FindStringSubmatch(line)
if match == nil {
return nil, fmt.Errorf("Line %d of status doesn't match", lineNo)
}
for i, name := range re.SubexpNames() {
if i != 0 {
value, err := strconv.Atoi(match[i])
if err != nil {
log.Warn("Can't cast value '%s'", match[i])
continue
}
metrics[name] = value
}
}
}
metrics["version"] = ""
metrics["edition"] = "open source"
return metrics, nil
}
// getPlusMetrics reads an NGINX (Plus edition) status message, gets some
// metrics and transforms the contents into a map that can be processed by NR
// agent.
// It returns a map of metrics keys -> values.
func getPlusMetrics(reader *bufio.Reader) (map[string]interface{}, error) {
jsonMetrics := make(map[string]interface{})
metrics := make(map[string]interface{})
dec := json.NewDecoder(reader)
err := dec.Decode(&jsonMetrics)
if err != nil {
return nil, err
}
roots := []string{"connections", "requests", "ssl", "processes"}
for _, rootKey := range roots {
rootNode, ok := jsonMetrics[rootKey].(map[string]interface{})
if !ok {
log.Warn("Can't assert type for %s", rootNode)
continue
}
for key, value := range rootNode {
metrics[fmt.Sprintf("%s.%s", rootKey, key)] = int(value.(float64))
}
}
metrics["version"] = jsonMetrics["nginx_version"]
metrics["edition"] = "plus"
return metrics, nil
}
func populateMetrics(sample *metric.Set, metrics map[string]interface{}, metricsDefinition map[string][]interface{}) error {
for metricName, metricInfo := range metricsDefinition {
rawSource := metricInfo[0]
metricType := metricInfo[1].(metric.SourceType)
var rawMetric interface{}
var ok bool
switch source := rawSource.(type) {
case string:
rawMetric, ok = metrics[source]
case func(map[string]interface{}) (int, bool):
rawMetric, ok = source(metrics)
default:
log.Warn("Invalid raw source metric for %s", metricName)
continue
}
if !ok {
log.Warn("Can't find raw metrics in results for %s", metricName)
continue
}
err := sample.SetMetric(metricName, rawMetric, metricType)
if err != nil {
log.Warn("Error setting value: %s", err)
continue
}
}
return nil
}
func getMetricsData(sample *metric.Set) error {
switch args.StatusModule {
case httpStubStatus:
resp, err := getStatus("")
if err != nil {
return err
}
defer resp.Body.Close()
metricsDefinition := metricsStandardDefinition
rawMetrics, err := getStandardMetrics(bufio.NewReader(resp.Body))
if err != nil {
return err
}
rawVersion := strings.Replace(resp.Header.Get("Server"), "nginx/", "", -1)
rawMetrics["version"] = rawVersion
return populateMetrics(sample, rawMetrics, metricsDefinition)
case httpStatus:
resp, err := getStatus("")
if err != nil {
return err
}
defer resp.Body.Close()
metricsDefinition := metricsPlusDefinition
rawMetrics, err := getPlusMetrics(bufio.NewReader(resp.Body))
if err != nil {
return err
}
return populateMetrics(sample, rawMetrics, metricsDefinition)
case httpAPIStatus:
return pollHttpAPIStatusEndpoints(sample)
default:
return getDiscoveredMetricsData(sample)
}
}
func pollHttpAPIStatusEndpoints(sample *metric.Set) error {
paths := []string{"/nginx", "/processes", "/connections", "/http/requests", "/ssl"}
for _, p := range paths {
resp, err := getStatus(p)
if err != nil {
log.Warn("Request to endpoint failed: %s", err)
continue
}
defer func() {
if err := resp.Body.Close(); err != nil {
log.Warn("Unable to close response body: %s", err)
}
}()
getHTTPAPIMetrics(p, sample, bufio.NewReader(resp.Body))
}
return nil
}
func getHTTPAPIMetrics(path string, sample *metric.Set, reader *bufio.Reader) {
jsonMetrics := make(map[string]interface{})
dec := json.NewDecoder(reader)
err := dec.Decode(&jsonMetrics)
if err != nil {
return
}
if jsonMetrics == nil || len(jsonMetrics) <= 0 {
return
}
flat, err := flatten.Flatten(jsonMetrics, "", flatten.DotStyle)
if err != nil {
log.Error("Error flattening json: %+v", err)
return
}
for k, v := range flat {
key := pathToPrefix(path) + k
realKey, typ := getAttributeType(key, v)
if err := sample.SetMetric(realKey, v, typ); err != nil {
log.Error("Unable to set metric: %s", err)
}
}
if err := sample.SetMetric("software.edition", "plus", metric.ATTRIBUTE); err != nil {
log.Error("Unable to set metric: %s", err)
}
}
var notJustDots = regexp.MustCompile(`[^.]`)
func pathToPrefix(path string) (prefix string) {
prefix = strings.TrimPrefix(path, "/")
prefix = strings.Replace(prefix, "/", ".", -1)
if !strings.HasSuffix(prefix, ".") {
prefix = prefix + "."
}
if prefix == "." {
prefix = ""
}
if !notJustDots.MatchString(prefix) {
prefix = ""
}
return prefix
}
func getAttributeType(key string, v interface{}) (string, metric.SourceType) {
if md, ok := metricsPlusAPIDefinition[key]; ok {
return md[0].(string), md[1].(metric.SourceType)
}
// if nothing else matches infer type from the value
switch v.(type) {
case string:
return key, metric.ATTRIBUTE
default:
return key, metric.GAUGE
}
}
func httpClient() *http.Client {
netClient := http.Client{
Timeout: time.Duration(args.ConnectionTimeout) * time.Second,
}
if !args.ValidateCerts {
netClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
return &netClient
}
func getStatus(path string) (resp *http.Response, err error) {
netClient := httpClient()
req, _ := http.NewRequest("GET", args.StatusURL+path, nil)
host := os.Getenv("HOST_HEADER")
if host != "" {
//log.Warn("Adding Host header: %s", host)
// https://stackoverflow.com/a/41034588
// Pay attention that in http.Request header "Host" can not be set via Set/Add method
//req.Header.Add("Host", host)
req.Host = host
}
resp, err = netClient.Do(req)
if err != nil {
return
}
if resp.StatusCode != http.StatusOK {
return resp, fmt.Errorf("failed to get stats from %s. Server returned code %d (%s). Expecting 200", args.StatusURL+path, resp.StatusCode, resp.Status)
}
return
}
// For backwards compatibility, the integration tries to discover whether the metrics are standard or nginx plus based
// on their format
func getDiscoveredMetricsData(sample *metric.Set) error {
netClient := httpClient()
resp, err := netClient.Get(args.StatusURL)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to get stats from nginx. Server returned code %d (%s). Expecting 200",
resp.StatusCode, resp.Status)
}
defer resp.Body.Close()
var rawMetrics map[string]interface{}
var metricsDefinition map[string][]interface{}
if resp.Header.Get("content-type") == "application/json" {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if strings.Contains(string(bodyBytes), nginxPlusApiRootNginxEndpoint) {
return pollHttpAPIStatusEndpoints(sample)
}
metricsDefinition = metricsPlusDefinition
rawMetrics, err = getPlusMetrics(bufio.NewReader(bytes.NewBuffer(bodyBytes)))
if err != nil {
return err
}
} else {
metricsDefinition = metricsStandardDefinition
rawMetrics, err = getStandardMetrics(bufio.NewReader(resp.Body))
if err != nil {
return err
}
rawVersion := strings.Replace(resp.Header.Get("Server"), "nginx/", "", -1)
rawMetrics["version"] = rawVersion
}
return populateMetrics(sample, rawMetrics, metricsDefinition)
}
| [
"\"HOST_HEADER\""
]
| []
| [
"HOST_HEADER"
]
| [] | ["HOST_HEADER"] | go | 1 | 0 | |
emtract/model_inference.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from emtract.model import Model, ModelType
import pandas as pd
class ModelInference:
MODEL_BASE_PATH = 'build/models/'
DATA_BASE_PATH = './emtract/data/'
def __init__(self, model_type):
if model_type == 'twitter':
self.model = Model(ModelType.TWITTER)
else:
self.model = Model(ModelType.STOCK_TWITS)
def inference(self, text):
return self.model.predict([text])
def file_inference(self, file_name, output):
df = pd.read_csv(file_name, header=None)
predictions = self.model.predict(df.iloc[:, 0].values)
predictions.to_csv(output, index=False)
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
agent/command/exec.go | package command
import (
"context"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/evergreen-ci/evergreen"
"github.com/evergreen-ci/evergreen/agent/internal"
"github.com/evergreen-ci/evergreen/agent/internal/client"
agentutil "github.com/evergreen-ci/evergreen/agent/util"
"github.com/evergreen-ci/evergreen/util"
"github.com/google/shlex"
"github.com/mitchellh/mapstructure"
"github.com/mongodb/grip"
"github.com/mongodb/grip/level"
"github.com/mongodb/grip/message"
"github.com/mongodb/jasper"
"github.com/mongodb/jasper/options"
"github.com/pkg/errors"
)
type subprocessExec struct {
Binary string `mapstructure:"binary"`
Args []string `mapstructure:"args"`
Env map[string]string `mapstructure:"env"`
Command string `mapstructure:"command"`
Path []string `mapstructure:"add_to_path"`
// Add defined expansions to the environment of the process
// that's launched.
AddExpansionsToEnv bool `mapstructure:"add_expansions_to_env"`
// IncludeExpansionsInEnv allows users to specify a number of
// expansions that will be included in the environment, if
// they are defined. It is not an error to specify expansions
// that are not defined in include_expansions_in_env.
IncludeExpansionsInEnv []string `mapstructure:"include_expansions_in_env"`
// Background, if set to true, prevents shell code/output from
// waiting for the script to complete and immediately returns
// to the caller
Background bool `mapstructure:"background"`
// Silent, if set to true, prevents shell code/output from being
// logged to the agent's task logs. This can be used to avoid
// exposing sensitive expansion parameters and keys.
Silent bool `mapstructure:"silent"`
// SystemLog if set will write the shell command's output to the system logs, instead of the
// task logs. This can be used to collect diagnostic data in the background of a running task.
SystemLog bool `mapstructure:"system_log"`
// WorkingDir is the working directory to start the shell in.
WorkingDir string `mapstructure:"working_dir"`
// IgnoreStandardOutput and IgnoreStandardError allow users to
// elect to ignore either standard out and/or standard output.
IgnoreStandardOutput bool `mapstructure:"ignore_standard_out"`
IgnoreStandardError bool `mapstructure:"ignore_standard_error"`
// RedirectStandardErrorToOutput allows you to capture
// standard error in the same stream as standard output. This
// improves the synchronization of these streams.
RedirectStandardErrorToOutput bool `mapstructure:"redirect_standard_error_to_output"`
// ContinueOnError determines whether or not a failed return code
// should cause the task to be marked as failed. Setting this to true
// allows following commands to execute even if this shell command fails.
ContinueOnError bool `mapstructure:"continue_on_err"`
// KeepEmptyArgs will allow empty arguments in commands if set to true
// note that non-blank whitespace arguments are never stripped
KeepEmptyArgs bool `mapstructure:"keep_empty_args"`
base
}
func subprocessExecFactory() Command { return &subprocessExec{} }
func (c *subprocessExec) Name() string { return "subprocess.exec" }
func (c *subprocessExec) ParseParams(params map[string]interface{}) error {
err := mapstructure.Decode(params, c)
if err != nil {
return errors.Wrapf(err, "error decoding %s params", c.Name())
}
if c.Command != "" {
if c.Binary != "" || len(c.Args) > 0 {
return errors.New("must specify command as either arguments or a command string but not both")
}
args, err := shlex.Split(c.Command)
if err != nil {
return errors.Wrapf(err, "problem parsing %s command", c.Name())
}
if len(args) == 0 {
return errors.Errorf("no arguments for command %s", c.Name())
}
c.Binary = args[0]
if len(args) > 1 {
c.Args = args[1:]
}
}
if c.Silent {
c.IgnoreStandardError = true
c.IgnoreStandardOutput = true
}
if c.IgnoreStandardOutput && c.RedirectStandardErrorToOutput {
return errors.New("cannot ignore standard out, and redirect standard error to it")
}
if c.Env == nil {
c.Env = make(map[string]string)
}
return nil
}
func (c *subprocessExec) doExpansions(exp *util.Expansions) error {
var err error
catcher := grip.NewBasicCatcher()
c.WorkingDir, err = exp.ExpandString(c.WorkingDir)
catcher.Add(err)
c.Binary, err = exp.ExpandString(c.Binary)
catcher.Add(err)
for idx := range c.Args {
c.Args[idx], err = exp.ExpandString(c.Args[idx])
catcher.Add(err)
}
for k, v := range c.Env {
c.Env[k], err = exp.ExpandString(v)
catcher.Add(err)
}
if len(c.Path) > 0 {
path := make([]string, len(c.Path), len(c.Path)+1)
for idx := range c.Path {
path[idx], err = exp.ExpandString(c.Path[idx])
catcher.Add(err)
}
path = append(path, os.Getenv("PATH"))
c.Env["PATH"] = strings.Join(path, string(filepath.ListSeparator))
}
return errors.Wrap(catcher.Resolve(), "problem expanding strings")
}
type modifyEnvOptions struct {
taskID string
workingDir string
tmpDir string
expansions util.Expansions
includeExpansionsInEnv []string
addExpansionsToEnv bool
}
func defaultAndApplyExpansionsToEnv(env map[string]string, opts modifyEnvOptions) map[string]string {
if env == nil {
env = map[string]string{}
}
expansions := opts.expansions.Map()
if opts.addExpansionsToEnv {
for k, v := range expansions {
if k == evergreen.GlobalGitHubTokenExpansion {
//users should not be able to use the global github token expansion
//as it can result in the breaching of Evergreen's GitHub API limit
continue
}
env[k] = v
}
}
for _, expName := range opts.includeExpansionsInEnv {
if val, ok := expansions[expName]; ok && expName != evergreen.GlobalGitHubTokenExpansion {
env[expName] = val
}
}
env[agentutil.MarkerTaskID] = opts.taskID
env[agentutil.MarkerAgentPID] = strconv.Itoa(os.Getpid())
addTempDirs(env, opts.tmpDir)
if _, ok := env["GOCACHE"]; !ok {
env["GOCACHE"] = filepath.Join(opts.workingDir, ".gocache")
}
if _, ok := env["CI"]; !ok {
env["CI"] = "true"
}
return env
}
func (c *subprocessExec) getProc(ctx context.Context, taskID string, logger client.LoggerProducer) *jasper.Command {
cmd := c.JasperManager().CreateCommand(ctx).Add(append([]string{c.Binary}, c.Args...)).
Background(c.Background).Environment(c.Env).Directory(c.WorkingDir).
SuppressStandardError(c.IgnoreStandardError).SuppressStandardOutput(c.IgnoreStandardOutput).RedirectErrorToOutput(c.RedirectStandardErrorToOutput).
ProcConstructor(func(lctx context.Context, opts *options.Create) (jasper.Process, error) {
var cancel context.CancelFunc
var ictx context.Context
if c.Background {
ictx, cancel = context.WithCancel(context.Background())
} else {
ictx = lctx
}
proc, err := c.JasperManager().CreateProcess(ictx, opts)
if err != nil {
if cancel != nil {
cancel()
}
return proc, errors.WithStack(err)
}
if cancel != nil {
grip.Warning(message.WrapError(proc.RegisterTrigger(lctx, func(info jasper.ProcessInfo) {
cancel()
}), "problem registering cancellation for process"))
}
pid := proc.Info(ctx).PID
agentutil.TrackProcess(taskID, pid, logger.System())
if c.Background {
logger.Execution().Debugf("running command in the background [pid=%d]", pid)
} else {
logger.Execution().Infof("started process with pid '%d'", pid)
}
return proc, nil
})
if !c.IgnoreStandardOutput {
if c.SystemLog {
cmd.SetOutputSender(level.Info, logger.System().GetSender())
} else {
cmd.SetOutputSender(level.Info, logger.Task().GetSender())
}
}
if !c.IgnoreStandardError {
if c.SystemLog {
cmd.SetErrorSender(level.Error, logger.System().GetSender())
} else {
cmd.SetErrorSender(level.Error, logger.Task().GetSender())
}
}
return cmd
}
func addTempDirs(env map[string]string, dir string) {
for _, key := range []string{"TMP", "TMPDIR", "TEMP"} {
if _, ok := env[key]; ok {
continue
}
env[key] = dir
}
}
func (c *subprocessExec) Execute(ctx context.Context, comm client.Communicator, logger client.LoggerProducer, conf *internal.TaskConfig) error {
var err error
if err = c.doExpansions(conf.Expansions); err != nil {
logger.Execution().Error("problem expanding command values")
return errors.WithStack(err)
}
logger.Execution().WarningWhen(
filepath.IsAbs(c.WorkingDir) && !strings.HasPrefix(c.WorkingDir, conf.WorkDir),
message.Fields{
"message": "the working directory is an absolute path without the required prefix",
"path": c.WorkingDir,
"required_prefix": conf.WorkDir,
})
c.WorkingDir, err = conf.GetWorkingDirectory(c.WorkingDir)
if err != nil {
logger.Execution().Warning(err.Error())
return errors.WithStack(err)
}
taskTmpDir, err := conf.GetWorkingDirectory("tmp")
if err != nil {
logger.Execution().Notice(err.Error())
}
var exp util.Expansions
if conf.Expansions != nil {
exp = *conf.Expansions
}
c.Env = defaultAndApplyExpansionsToEnv(c.Env, modifyEnvOptions{
taskID: conf.Task.Id,
workingDir: c.WorkingDir,
tmpDir: taskTmpDir,
expansions: exp,
includeExpansionsInEnv: c.IncludeExpansionsInEnv,
addExpansionsToEnv: c.AddExpansionsToEnv,
})
if !c.KeepEmptyArgs {
for i := len(c.Args) - 1; i >= 0; i-- {
if c.Args[i] == "" {
c.Args = append(c.Args[:i], c.Args[i+1:]...)
}
}
}
logger.Execution().Debug(message.Fields{
"working_directory": c.WorkingDir,
"background": c.Background,
"binary": c.Binary,
})
err = errors.WithStack(c.runCommand(ctx, conf.Task.Id, c.getProc(ctx, conf.Task.Id, logger), logger))
if ctx.Err() != nil {
logger.System().Debug("dumping running processes")
logger.System().Debug(message.CollectAllProcesses())
logger.Execution().Notice(err)
return errors.Errorf("%s aborted", c.Name())
}
return err
}
func (c *subprocessExec) runCommand(ctx context.Context, taskID string, cmd *jasper.Command, logger client.LoggerProducer) error {
if c.Silent {
logger.Execution().Info("executing command in silent mode")
}
err := cmd.Run(ctx)
if c.ContinueOnError {
logger.Execution().Notice(message.WrapError(err, message.Fields{
"task": taskID,
"binary": c.Binary,
"background": c.Background,
"silent": c.Silent,
"continue": c.ContinueOnError,
}))
return nil
}
return err
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
core/commands/config.go | package commands
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"github.com/TRON-US/go-btfs/core/commands/cmdenv"
"github.com/TRON-US/go-btfs/repo"
"github.com/TRON-US/go-btfs/repo/fsrepo"
cmds "github.com/TRON-US/go-btfs-cmds"
config "github.com/TRON-US/go-btfs-config"
"github.com/elgris/jsondiff"
)
// ConfigUpdateOutput is config profile apply command's output
type ConfigUpdateOutput struct {
OldCfg map[string]interface{}
NewCfg map[string]interface{}
}
type ConfigField struct {
Key string
Value interface{}
}
const (
configBoolOptionName = "bool"
configJSONOptionName = "json"
configDryRunOptionName = "dry-run"
)
var ConfigCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Get and set btfs config values.",
ShortDescription: `
'btfs config' controls configuration variables. It works like 'git config'.
The configuration values are stored in a config file inside your btfs
repository.`,
LongDescription: `
'btfs config' controls configuration variables. It works
much like 'git config'. The configuration values are stored in a config
file inside your BTFS repository.
Examples:
Get the value of the 'Datastore.Path' key:
$ btfs config Datastore.Path
Set the value of the 'Datastore.Path' key:
$ btfs config Datastore.Path ~/.btfs/datastore
`,
},
Subcommands: map[string]*cmds.Command{
"show": configShowCmd,
"edit": configEditCmd,
"replace": configReplaceCmd,
"profile": configProfileCmd,
"optin": optInCmd,
"optout": optOutCmd,
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, false, "The key of the config entry (e.g. \"Addresses.API\")."),
cmds.StringArg("value", false, false, "The value to set the config entry to."),
},
Options: []cmds.Option{
cmds.BoolOption(configBoolOptionName, "Set a boolean value."),
cmds.BoolOption(configJSONOptionName, "Parse stringified JSON."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
args := req.Arguments
key := args[0]
var output *ConfigField
// This is a temporary fix until we move the private key out of the config file
switch strings.ToLower(key) {
case "identity", "identity.privkey", "identity.mnemonic", "identity.peerid":
return fmt.Errorf("cannot show or change %s through API", key)
default:
}
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
r, err := fsrepo.Open(cfgRoot)
if err != nil {
return err
}
defer r.Close()
if len(args) == 2 {
value := args[1]
if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON {
var jsonVal interface{}
if err := json.Unmarshal([]byte(value), &jsonVal); err != nil {
err = fmt.Errorf("failed to unmarshal json. %s", err)
return err
}
output, err = setConfig(r, key, jsonVal)
} else if isbool, _ := req.Options[configBoolOptionName].(bool); isbool {
output, err = setConfig(r, key, value == "true")
} else {
output, err = setConfig(r, key, value)
}
if err != nil {
return err
}
if f, err := getConfig(r, "UI.Wallet.Initialized"); err == nil {
if f.Value.(bool) == true {
err := r.SetConfigKey("Identity.Mnemonic", "")
if err != nil {
return err
}
}
}
} else {
output, err = getConfig(r, key)
}
if err != nil {
return err
}
return cmds.EmitOnce(res, output)
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ConfigField) error {
if len(req.Arguments) == 2 {
return nil
}
buf, err := config.HumanOutput(out.Value)
if err != nil {
return err
}
buf = append(buf, byte('\n'))
_, err = w.Write(buf)
return err
}),
},
Type: ConfigField{},
}
var configShowCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Output config file contents.",
ShortDescription: `
NOTE: For security reasons, this command will omit your private key. If you would like to make a full backup of your config (private key included), you must copy the config file from your repo.
`,
},
Type: map[string]interface{}{},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
fname, err := config.Filename(cfgRoot)
if err != nil {
return err
}
data, err := ioutil.ReadFile(fname)
if err != nil {
return err
}
var cfg map[string]interface{}
err = json.Unmarshal(data, &cfg)
if err != nil {
return err
}
for _, k := range []string{config.PrivKeyTag, config.MnemonicTag} {
err = scrubValue(cfg, []string{config.IdentityTag, k})
if err != nil {
return err
}
}
return cmds.EmitOnce(res, &cfg)
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]interface{}) error {
buf, err := config.HumanOutput(out)
if err != nil {
return err
}
buf = append(buf, byte('\n'))
_, err = w.Write(buf)
return err
}),
},
}
func scrubValue(m map[string]interface{}, key []string) error {
find := func(m map[string]interface{}, k string) (string, interface{}, bool) {
lckey := strings.ToLower(k)
for mkey, val := range m {
lcmkey := strings.ToLower(mkey)
if lckey == lcmkey {
return mkey, val, true
}
}
return "", nil, false
}
cur := m
for _, k := range key[:len(key)-1] {
foundk, val, ok := find(cur, k)
if !ok {
return errors.New("failed to find specified key")
}
if foundk != k {
// case mismatch, calling this an error
return fmt.Errorf("case mismatch in config, expected %q but got %q", k, foundk)
}
mval, mok := val.(map[string]interface{})
if !mok {
return fmt.Errorf("%s was not a map", foundk)
}
cur = mval
}
todel, _, ok := find(cur, key[len(key)-1])
if ok {
delete(cur, todel)
}
return nil
}
var configEditCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Open the config file for editing in $EDITOR.",
ShortDescription: `
To use 'btfs config edit', you must have the $EDITOR environment
variable set to your preferred text editor.
`,
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
filename, err := config.Filename(cfgRoot)
if err != nil {
return err
}
return editConfig(filename)
},
}
var configReplaceCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Replace the config with <file>.",
ShortDescription: `
Make sure to back up the config file first if necessary, as this operation
can't be undone.
`,
},
Arguments: []cmds.Argument{
cmds.FileArg("file", true, false, "The file to use as the new config."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
r, err := fsrepo.Open(cfgRoot)
if err != nil {
return err
}
defer r.Close()
file, err := cmdenv.GetFileArg(req.Files.Entries())
if err != nil {
return err
}
defer file.Close()
return replaceConfig(r, file)
},
}
var configProfileCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Apply profiles to config.",
ShortDescription: fmt.Sprintf(`
Available profiles:
%s
`, buildProfileHelp()),
},
Subcommands: map[string]*cmds.Command{
"apply": configProfileApplyCmd,
},
}
var configProfileApplyCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Apply profile to config.",
},
Options: []cmds.Option{
cmds.BoolOption(configDryRunOptionName, "print difference between the current config and the config that would be generated"),
},
Arguments: []cmds.Argument{
cmds.StringArg("profile", true, false, "The profile to apply to the config."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
profile, ok := config.Profiles[req.Arguments[0]]
if !ok {
return fmt.Errorf("%s is not a profile", req.Arguments[0])
}
dryRun, _ := req.Options[configDryRunOptionName].(bool)
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
oldCfg, newCfg, err := transformConfig(cfgRoot, req.Arguments[0], profile.Transform, dryRun)
if err != nil {
return err
}
oldCfgMap, err := scrubPrivKey(oldCfg)
if err != nil {
return err
}
newCfgMap, err := scrubPrivKey(newCfg)
if err != nil {
return err
}
return cmds.EmitOnce(res, &ConfigUpdateOutput{
OldCfg: oldCfgMap,
NewCfg: newCfgMap,
})
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ConfigUpdateOutput) error {
diff := jsondiff.Compare(out.OldCfg, out.NewCfg)
buf := jsondiff.Format(diff)
_, err := w.Write(buf)
return err
}),
},
Type: ConfigUpdateOutput{},
}
var optInCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Opt-in enables analytic data collection (default).",
ShortDescription: `To change the setting (to opt-out), execute 'btfs config optout'.`,
LongDescription: `
'btfs config optin' controls configuration variable 'Experimental.Analytics'.
By setting the configuration value to 'true', you agree to the collection of the following data:
1. A random, generated BTFS Node ID
2. Aggregate Node Uptime
3. BTFS version; e.g. 0.1.0
4. OS Type
5. CPU Architecture Type
6. Node GPS location (longitute, latitude)
`,
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := cmdenv.GetNode(env)
if err != nil {
return err
}
config, err := n.Repo.Config()
if err != nil {
return err
}
config.Experimental.Analytics = true
var output *ConfigField
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
r, err := fsrepo.Open(cfgRoot)
if err != nil {
return err
}
output, err = setConfig(r, "Experimental.Analytics", true)
if err != nil {
return err
}
return cmds.EmitOnce(res, output)
},
}
var optOutCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Opt-out disables collection of the analytics data (enabled by default).",
ShortDescription: `In order to opt out of the collection of analytics data, execute 'btfs config optout.`,
LongDescription: `
'btfs config optout' controls configuration variable 'Experimental.Analytics'.
By setting the configuration value to 'false', you disable the collection of the following analytics data:
1. A random, generated BTFS Node ID
2. Aggregate Node Uptime
3. BTFS version; e.g. 0.1.0
4. OS Type
5. CPU Architecture Type
6. Node GPS location (longitute, latitude)
`,
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := cmdenv.GetNode(env)
if err != nil {
return err
}
config, err := n.Repo.Config()
if err != nil {
return err
}
config.Experimental.Analytics = false
var output *ConfigField
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
return err
}
r, err := fsrepo.Open(cfgRoot)
if err != nil {
return err
}
output, err = setConfig(r, "Experimental.Analytics", false)
if err != nil {
return err
}
return cmds.EmitOnce(res, output)
},
}
func buildProfileHelp() string {
var out string
for name, profile := range config.Profiles {
dlines := strings.Split(profile.Description, "\n")
for i := range dlines {
dlines[i] = " " + dlines[i]
}
out = out + fmt.Sprintf(" '%s':\n%s\n", name, strings.Join(dlines, "\n"))
}
return out
}
// scrubPrivKey scrubs private key for security reasons.
func scrubPrivKey(cfg *config.Config) (map[string]interface{}, error) {
cfgMap, err := config.ToMap(cfg)
if err != nil {
return nil, err
}
err = scrubValue(cfgMap, []string{config.IdentityTag, config.PrivKeyTag})
if err != nil {
return nil, err
}
return cfgMap, nil
}
// transformConfig returns old config and new config instead of difference between they,
// because apply command can provide stable API through this way.
// If dryRun is true, repo's config should not be updated and persisted
// to storage. Otherwise, repo's config should be updated and persisted
// to storage.
func transformConfig(configRoot string, configName string, transformer config.Transformer, dryRun bool) (*config.Config, *config.Config, error) {
r, err := fsrepo.Open(configRoot)
if err != nil {
return nil, nil, err
}
defer r.Close()
oldCfg, err := r.Config()
if err != nil {
return nil, nil, err
}
// make a copy to avoid updating repo's config unintentionally
newCfg, err := oldCfg.Clone()
if err != nil {
return nil, nil, err
}
err = transformer(newCfg)
if err != nil {
return nil, nil, err
}
if !dryRun {
_, err = r.BackupConfig("pre-" + configName + "-")
if err != nil {
return nil, nil, err
}
err = r.SetConfig(newCfg)
if err != nil {
return nil, nil, err
}
}
return oldCfg, newCfg, nil
}
func getConfig(r repo.Repo, key string) (*ConfigField, error) {
value, err := r.GetConfigKey(key)
if err != nil {
return nil, fmt.Errorf("failed to get config value: %q", err)
}
return &ConfigField{
Key: key,
Value: value,
}, nil
}
func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) {
err := r.SetConfigKey(key, value)
if err != nil {
return nil, fmt.Errorf("failed to set config value: %s (maybe use --json?)", err)
}
return getConfig(r, key)
}
func editConfig(filename string) error {
editor := os.Getenv("EDITOR")
if editor == "" {
return errors.New("ENV variable $EDITOR not set")
}
cmd := exec.Command("sh", "-c", editor+" "+filename)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
return cmd.Run()
}
func replaceConfig(r repo.Repo, file io.Reader) error {
var cfg config.Config
if err := json.NewDecoder(file).Decode(&cfg); err != nil {
return errors.New("failed to decode file as config")
}
if len(cfg.Identity.PrivKey) != 0 {
return errors.New("setting private key with API is not supported")
}
keyF, err := getConfig(r, config.PrivKeySelector)
if err != nil {
return errors.New("failed to get PrivKey")
}
pkstr, ok := keyF.Value.(string)
if !ok {
return errors.New("private key in config was not a string")
}
cfg.Identity.PrivKey = pkstr
return r.SetConfig(&cfg)
}
| [
"\"EDITOR\""
]
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | go | 1 | 0 | |
internal/repl/commands.go | package replpkg
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"text/tabwriter"
"time"
"go/ast"
"go/build"
"go/importer"
"go/types"
"golang.org/x/tools/go/ast/astutil"
)
type command struct {
name string
action func(*Session, string) (string, error)
complete func(*Session, string) []string
arg string
document string
}
// TODO
// - :edit
// - :undo
// - :reset
// - :type
var commands []command
func init() {
commands = []command{
{
name: "import",
action: actionImport,
complete: completeImport,
arg: "<package>",
document: "import a package",
},
{
name: "print",
action: actionPrint,
document: "print current source",
},
{
name: "write",
action: actionWrite,
complete: nil, // TODO implement
arg: "[<file>]",
document: "write out current source",
},
{
name: "help",
action: actionHelp,
document: "show this help",
},
{
name: "quit",
action: actionQuit,
document: "quit the session",
},
{
name: "containerize",
action: actionContainerize,
document: "containerize go binary",
},
}
}
func actionImport(s *Session, arg string) (string, error) {
if arg == "" {
return "", fmt.Errorf("arg required")
}
path := strings.Trim(arg, `"`)
// check if the package specified by path is importable
_, err := importer.Default().Import(path)
if err != nil {
return "", err
}
astutil.AddImport(s.Fset, s.File, path)
return "", nil
}
var gorootSrc = filepath.Join(filepath.Clean(runtime.GOROOT()), "src")
func completeImport(s *Session, prefix string) []string {
result := []string{}
seen := map[string]bool{}
d, fn := path.Split(prefix)
for _, srcDir := range build.Default.SrcDirs() {
dir := filepath.Join(srcDir, d)
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
if err != nil && !os.IsNotExist(err) {
errorf("Stat %s: %s", dir, err.Error())
}
continue
}
entries, err := ioutil.ReadDir(dir)
if err != nil {
errorf("ReadDir %s: %s", dir, err.Error())
continue
}
for _, fi := range entries {
if !fi.IsDir() {
continue
}
name := fi.Name()
if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") || name == "testdata" {
continue
}
if strings.HasPrefix(name, fn) {
r := path.Join(d, name)
if srcDir != gorootSrc {
// append "/" if this directory is not a repository
// e.g. does not have VCS directory such as .git or .hg
// TODO: do not append "/" to subdirectories of repos
var isRepo bool
for _, vcsDir := range []string{".git", ".hg", ".svn", ".bzr"} {
_, err := os.Stat(filepath.Join(srcDir, filepath.FromSlash(r), vcsDir))
if err == nil {
isRepo = true
break
}
}
if !isRepo {
r = r + "/"
}
}
if !seen[r] {
result = append(result, r)
seen[r] = true
}
}
}
}
return result
}
func actionPrint(s *Session, _ string) (string, error) {
source, err := s.source(true)
if err == nil {
fmt.Println(source)
}
return source, err
}
func actionWrite(s *Session, filename string) (string, error) {
source, err := s.source(false)
if err != nil {
return "", err
}
if filename == "" {
filename = fmt.Sprintf("gore_session_%s.go", time.Now().Format("20060102_150405"))
}
err = ioutil.WriteFile(filename, []byte(source), 0644)
if err != nil {
return "", err
}
infof("Source wrote to %s", filename)
return "", nil
}
func actionDoc(s *Session, in string) (string, error) {
s.clearQuickFix()
s.storeMainBody()
defer s.restoreMainBody()
expr, err := s.evalExpr(in)
if err != nil {
return "", err
}
s.TypeInfo = types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Uses: make(map[*ast.Ident]types.Object),
Defs: make(map[*ast.Ident]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
}
_, err = s.Types.Check("_tmp", s.Fset, []*ast.File{s.File}, &s.TypeInfo)
if err != nil {
debugf("typecheck error (ignored): %s", err)
}
// :doc patterns:
// - "json" -> "encoding/json" (package name)
// - "json.Encoder" -> "encoding/json", "Encoder" (package member)
// - "json.NewEncoder(nil).Encode" -> "encoding/json", "Decode" (package type member)
var docObj types.Object
if sel, ok := expr.(*ast.SelectorExpr); ok {
// package member, package type member
docObj = s.TypeInfo.ObjectOf(sel.Sel)
} else if t := s.TypeInfo.TypeOf(expr); t != nil && t != types.Typ[types.Invalid] {
for {
if pt, ok := t.(*types.Pointer); ok {
t = pt.Elem()
} else {
break
}
}
switch t := t.(type) {
case *types.Named:
docObj = t.Obj()
case *types.Basic:
// builtin types
docObj = types.Universe.Lookup(t.Name())
}
} else if ident, ok := expr.(*ast.Ident); ok {
// package name
mainScope := s.TypeInfo.Scopes[s.mainFunc().Type]
_, docObj = mainScope.LookupParent(ident.Name, ident.NamePos)
}
if docObj == nil {
return "", fmt.Errorf("cannot determine the document location")
}
debugf("doc :: obj=%#v", docObj)
var pkgPath, objName string
if pkgName, ok := docObj.(*types.PkgName); ok {
pkgPath = pkgName.Imported().Path()
} else {
if pkg := docObj.Pkg(); pkg != nil {
pkgPath = pkg.Path()
} else {
pkgPath = "builtin"
}
objName = docObj.Name()
}
debugf("doc :: %q %q", pkgPath, objName)
args := []string{pkgPath}
if objName != "" {
args = append(args, objName)
}
godoc := exec.Command("godoc", args...)
godoc.Stderr = os.Stderr
// TODO just use PAGER?
if pagerCmd := os.Getenv("GORE_PAGER"); pagerCmd != "" {
r, err := godoc.StdoutPipe()
if err != nil {
return "", err
}
pager := exec.Command(pagerCmd)
pager.Stdin = r
pager.Stdout = os.Stdout
pager.Stderr = os.Stderr
err = pager.Start()
if err != nil {
return "", err
}
err = godoc.Run()
if err != nil {
return "", err
}
return "", pager.Wait()
}
godoc.Stdout = os.Stdout
return "", godoc.Run()
}
func actionHelp(s *Session, _ string) (string, error) {
w := tabwriter.NewWriter(os.Stdout, 0, 8, 4, ' ', 0)
for _, command := range commands {
cmd := ":" + command.name
if command.arg != "" {
cmd = cmd + " " + command.arg
}
w.Write([]byte(" " + cmd + "\t" + command.document + "\n"))
}
w.Flush()
return "", nil
}
func actionQuit(s *Session, _ string) (string, error) {
return "", ErrQuit
}
func actionContainerize(s *Session, _ string) (string, error) {
// get the source code
source, err := s.source(true)
if err != nil {
return "", err
}
// create tmp directory in GOPATH
gopath := os.Getenv("GOPATH")
os.Mkdir(gopath+"/src/tmpcontainerize", 0777)
d := []byte(source)
err = ioutil.WriteFile(gopath+"/src/tmpcontainerize/containerize.go", d, 0777)
if err != nil {
return "", err
}
cmd := exec.Command("go", "install", "tmpcontainerize")
err = cmd.Run()
if err != nil {
panic(err)
}
// dockerize
dockerfile := `
FROM scratch
ADD tmpcontainerize /tmpcontainerize
CMD ["/tmpcontainerize"]
`
d = []byte(dockerfile)
err = ioutil.WriteFile(gopath+"/bin/Dockerfile", d, 0777)
out, err := exec.Command("uuidgen").Output()
containerid := string(out)
containerid = containerid[0 : len(containerid)-1]
if err != nil {
return "", err
}
fmt.Println("Dockerizing")
cmd = exec.Command("docker", "build", "-t", containerid, gopath+"/bin/")
err = cmd.Run()
if err != nil {
fmt.Println(err)
panic(err)
}
fmt.Println("removing src")
// now that we have the binary, remove the tmp src
err = os.RemoveAll(gopath + "/src/tmpcontainerize/")
if err != nil {
return "", err
}
fmt.Println("removing docker image")
err = os.Remove(gopath + "/bin/Dockerfile")
if err != nil {
return "", err
}
return "", nil
}
| [
"\"GORE_PAGER\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"GORE_PAGER"
]
| [] | ["GOPATH", "GORE_PAGER"] | go | 2 | 0 | |
config.go | package main
import (
"context"
"encoding/base64"
"github.com/BHunter2889/da-fish-alexa/alexa"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-xray-sdk-go/xray"
"log"
"os"
"sync"
"gopkg.in/tomb.v2"
)
type BugCasterConfig struct {
AlexaApiUrl string
AlexaLocEndpoint string
GeoKey string
GeoUrl string
FishRatingUrl string
t tomb.Tomb
}
type KMSDecryptTomb struct {
ctx context.Context
s string
Ch chan string
t tomb.Tomb
}
// Defining as constants rather than reading from config file until resource monitoring is setup
// AlexaApiBaseUrl = "https://api.amazonalexa.com" --- US Endpoint. Will grab this from the incoming Request payload.
// %s - reserved for DeviceId
const AlexaLocEndpoint = "/v1/devices/%s/settings/address/countryAndPostalCode"
var (
KMS *kms.KMS
sess = session.Must(session.NewSession())
wg sync.WaitGroup
chanFR <-chan string
chanGK <-chan string
tombFR *KMSDecryptTomb
tombGK *KMSDecryptTomb
t tomb.Tomb
)
type AlexaRequestHandler func(context.Context, alexa.Request) (alexa.Response, error)
// Wrap The Handler so that we can use context to do some config BEFORE proceeding with handler.
func ContextConfigWrapper(h AlexaRequestHandler) AlexaRequestHandler {
return func(ctx context.Context, request alexa.Request) (response alexa.Response, err error) {
log.Print(request)
// Put up a Border Wall (which they can very easily get around)
if request.Body.Locale != "en-US" && request.Body.Locale != "en-CA" {
return alexa.NewUnsupportedLocationResponse(), nil
}
// If this is a Launch Request, we don't need Config at all, so kick it back out before it causes problems
if request.Body.Type == "LaunchRequest" {
return HandleLaunchRequest(request), nil
}
defer func() {
if r := recover(); r != nil {
log.Print("CONTEXT WRAPPER PANIC")
log.Print(err)
log.Print(r)
response = alexa.NewDefaultErrorResponse()
}
}()
log.Print(ctx)
NewBugCasterConfig(ctx)
response, err = h(ctx, request)
if err != nil {
log.Print(err)
panic(err.Error())
}
log.Print(response)
return response, nil
}
}
// We want this in a channel
// Logging For Demo Purposes
func (kdt *KMSDecryptTomb) decrypt() error {
defer wg.Done()
log.Print("New Decrypt...")
decodedBytes, err := base64.StdEncoding.DecodeString(kdt.s)
if err != nil {
log.Print(err)
// Conditional Exists here solely for Demoing Context Cancellation.
if err.Error() == request.CanceledErrorCode {
close(kdt.Ch)
log.Print("Context closed while in Decrypt, closed channel.")
return err
} else {
close(kdt.Ch)
return err
}
}
input := &kms.DecryptInput{
CiphertextBlob: decodedBytes,
}
log.Print("Calling KMS Decryption Service...")
response, err := KMS.DecryptWithContext(kdt.ctx, input)
// Conditional Exists here solely for Demoing Context Cancellation.
if err != nil && err.Error() == request.CanceledErrorCode {
close(kdt.Ch)
log.Print("Context closed while in Decrypt, closed channel.")
return err
} else if err != nil {
close(kdt.Ch)
return err
}
log.Print("Finished A KMS Decyption Go Routine.")
// Listen for either successful decryption or a Context Cancellation related event.
// Plaintext is a byte array, so convert to string
select {
case kdt.Ch <- string(response.Plaintext[:]) :
log.Print("KMS Response Channel select")
log.Print(string(response.Plaintext[:]))
return nil
case <-kdt.t.Dying():
log.Print("KMS Tomb Dying... ")
close(kdt.Ch)
return nil
}
}
func (kdt *KMSDecryptTomb) Stop() error {
kdt.t.Kill(nil)
return kdt.t.Wait()
}
func NewKMS() *kms.KMS {
log.Print("Init KMS Config")
c := kms.New(sess)
xray.AWS(c.Client)
return c
}
func NewBugCasterConfig(ctx context.Context) {
log.Print("NewBugCasterConfig")
KMS = NewKMS()
cfg = new(BugCasterConfig)
cfg.LoadConfig(ctx)
}
func NewKMSDecryptTomb(ctx context.Context, s string) *KMSDecryptTomb {
kdt := &KMSDecryptTomb{
ctx: ctx,
s: s,
Ch: make(chan string),
}
kdt.t.Go(kdt.decrypt)
return kdt
}
func KMSDecrytiponWaiter() {
log.Print("Waiting on KMS Decryption...")
cfg.FishRatingUrl = <-tombFR.Ch
log.Printf("FRU: %s", cfg.FishRatingUrl)
cfg.GeoKey = <-tombGK.Ch
log.Printf("GK: %s", cfg.GeoKey)
wg.Wait()
log.Print("Done Waiting On KMS Decryption.")
}
func init() {
log.Print("Init Xray in Config")
err := xray.Configure(xray.Config{
LogLevel: "info",
})
log.Print(err)
}
func (cfg *BugCasterConfig) LoadConfig(ctx context.Context) {
log.Print("Begin LoadConfig")
wg.Add(2)
cfg.AlexaLocEndpoint = AlexaLocEndpoint
tombFR = NewKMSDecryptTomb(ctx, os.Getenv("FISH_RATING_SERVICE_URL"))
tombGK = NewKMSDecryptTomb(ctx, os.Getenv("GEO_KEY"))
cfg.GeoUrl = os.Getenv("GEO_SERVICE_URL")
}
| [
"\"FISH_RATING_SERVICE_URL\"",
"\"GEO_KEY\"",
"\"GEO_SERVICE_URL\""
]
| []
| [
"GEO_SERVICE_URL",
"GEO_KEY",
"FISH_RATING_SERVICE_URL"
]
| [] | ["GEO_SERVICE_URL", "GEO_KEY", "FISH_RATING_SERVICE_URL"] | go | 3 | 0 | |
main.go | package main
import (
"io/ioutil"
"log"
"os"
"github.com/goapt/dotenv"
"github.com/urfave/cli"
"github.com/fifsky/drone-wechat-work/wechat"
)
func main() {
log.Println("Start notify")
if _, err := os.Stat("/run/drone/env"); err == nil {
_ = dotenv.Overload("/run/drone/env")
str, _ := ioutil.ReadFile("/run/drone/env")
log.Println(string(str))
}
app := cli.NewApp()
app.Name = "WeChat work robot plugin"
app.Usage = "Wechat Work robot plugin"
app.Action = run
app.Version = "1.0.0"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "url",
Usage: "The wechat work robot url",
EnvVar: "PLUGIN_URL",
},
cli.StringFlag{
Name: "msgtype",
Usage: "The type of message, either text, markdown",
Value: "text",
EnvVar: "PLUGIN_MSGTYPE",
},
cli.StringFlag{
Name: "touser",
Usage: "The users to send the message to, @all for all users",
Value: "@all",
EnvVar: "PLUGIN_TOUSER",
},
cli.StringFlag{
Name: "content",
Usage: "message content",
EnvVar: "PLUGIN_CONTENT",
},
// template
cli.StringFlag{
Name: "repo.owner",
Usage: "repository owner",
EnvVar: "DRONE_REPO_OWNER",
},
cli.StringFlag{
Name: "repo.name",
Usage: "repository name",
EnvVar: "DRONE_REPO_NAME",
},
cli.StringFlag{
Name: "commit.sha",
Usage: "git commit sha",
EnvVar: "DRONE_COMMIT_SHA",
},
cli.StringFlag{
Name: "commit.ref",
Value: "refs/heads/master",
Usage: "git commit ref",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "commit.branch",
Value: "master",
Usage: "git commit branch",
EnvVar: "DRONE_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "commit.author",
Usage: "git author name",
EnvVar: "DRONE_COMMIT_AUTHOR",
},
cli.StringFlag{
Name: "commit.message",
Usage: "commit message",
EnvVar: "DRONE_COMMIT_MESSAGE",
},
cli.StringFlag{
Name: "failed.steps",
Usage: "failed steps",
EnvVar: "DRONE_FAILED_STEPS",
},
cli.StringFlag{
Name: "build.event",
Value: "push",
Usage: "build event",
EnvVar: "DRONE_BUILD_EVENT",
},
cli.IntFlag{
Name: "build.number",
Usage: "build number",
EnvVar: "DRONE_BUILD_NUMBER",
},
cli.StringFlag{
Name: "build.status",
Usage: "build status",
Value: "success",
EnvVar: "DRONE_BUILD_STATUS",
},
cli.StringFlag{
Name: "build.link",
Usage: "build link",
EnvVar: "DRONE_BUILD_LINK",
},
cli.Int64Flag{
Name: "build.started",
Usage: "build started",
EnvVar: "DRONE_BUILD_STARTED",
},
cli.Int64Flag{
Name: "build.created",
Usage: "build created",
EnvVar: "DRONE_BUILD_CREATED",
},
cli.StringFlag{
Name: "build.tag",
Usage: "build tag",
EnvVar: "DRONE_TAG",
},
}
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
}
}
func run(c *cli.Context) error {
robot := wechat.WeChat{
Build: wechat.Build{
Owner: c.String("repo.owner"),
Name: c.String("repo.name"),
Tag: c.String("build.tag"),
Number: c.Int("build.number"),
Event: c.String("build.event"),
Status: c.String("build.status"),
Commit: c.String("commit.sha"),
Ref: c.String("commit.ref"),
Branch: c.String("commit.branch"),
Author: c.String("commit.author"),
Message: c.String("commit.message"),
Link: c.String("build.link"),
Started: c.Int64("build.started"),
Created: c.Int64("build.created"),
},
Url: c.String("url"),
MsgType: c.String("msgtype"),
ToUser: c.String("touser"),
Content: c.String("content"),
}
err := robot.Send()
if err != nil {
log.Println("notify fail", err)
} else {
log.Println("notify success, DRONE_BUILD_STATUS:", os.Getenv("DRONE_BUILD_STATUS"))
}
return err
}
| [
"\"DRONE_BUILD_STATUS\""
]
| []
| [
"DRONE_BUILD_STATUS"
]
| [] | ["DRONE_BUILD_STATUS"] | go | 1 | 0 | |
src/depslib/deps.go | /*---------------------------------------------------------------------------------------------
* Copyright (c) Peter Bjorklund. All rights reserved.
* Licensed under the MIT License. See LICENSE in the project root for license information.
*--------------------------------------------------------------------------------------------*/
package depslib
import (
"fmt"
"io"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/blang/semver"
)
type Mode uint8
const (
Wget Mode = iota
Clone
Symlink
)
func HackRemoveCShortName(shortname string) string {
if strings.HasSuffix(shortname, "-c") {
return shortname[:len(shortname)-2]
}
return shortname
}
func symlinkRepo(rootPath string, depsPath string, repoName string) error {
shortName := RepoNameToShortName(repoName)
packageDir := path.Join(rootPath, shortName+"/")
targetNameInDeps := path.Join(depsPath, shortName)
_, statErr := os.Stat(targetNameInDeps)
if statErr == nil {
return fmt.Errorf("there is already something at target '%v', can not create link", targetNameInDeps)
}
fmt.Printf("symlink '%v' to '%v'\n", packageDir, targetNameInDeps)
makeErr := MakeSymlink(packageDir, targetNameInDeps)
if makeErr != nil {
return makeErr
}
return nil
//return symlinkSrcInclude(packageDir, depsPath, shortName)
}
func wgetRepo(rootPath string, depsPath string, repoName string) error {
downloadURLString := fmt.Sprintf("https://%vgithub.com/%v/archive/master.zip", gitRepoPrefix(), repoName)
downloadURL, parseErr := url.Parse(downloadURLString)
if parseErr != nil {
return parseErr
}
contentReader, downloadErr := HTTPGet(downloadURL)
if downloadErr != nil {
return downloadErr
}
targetFile, createErr := os.Create("temp.zip")
if createErr != nil {
return createErr
}
_, copyErr := io.Copy(targetFile, contentReader)
if copyErr != nil {
return copyErr
}
contentReader.(io.Closer).Close()
targetFile.Close()
shortName := RepoNameToShortName(repoName)
targetDirectory := path.Join(depsPath, shortName)
zipPrefix := fmt.Sprintf("%v-master/", shortName)
unzipErr := unzipFile("temp.zip", targetDirectory, zipPrefix)
if unzipErr != nil {
return unzipErr
}
return nil
}
func gitClone(depsPath string, repoName string, shortName string) error {
downloadURLString := fmt.Sprintf("https://%vgithub.com/%v.git", gitRepoPrefix(), repoName)
downloadURL, parseErr := url.Parse(downloadURLString)
if parseErr != nil {
return parseErr
}
fmt.Printf("git clone from '%v' to %v\n", downloadURL, shortName)
cmd := exec.Command("git", "clone", downloadURL.String(), shortName)
cmd.Dir = depsPath
cmd.Start()
cmd.Wait()
return nil
}
func gitPull(targetDirectory string, repoName string) error {
fmt.Printf("git pull %v %v\n", repoName, targetDirectory)
cmd := exec.Command("git", "pull")
cmd.Dir = targetDirectory
cmd.Start()
cmd.Wait()
return nil
}
func gitRepoPrefix() string {
token := os.Getenv("GITHUB_TOKEN")
if token == "" {
return ""
}
fmt.Printf("found secret GITHUB_TOKEN\n")
return fmt.Sprintf("%v@", token)
}
func directoryExists(directory string) bool {
stat, checkDirectoryErr := os.Lstat(directory)
return checkDirectoryErr == nil && stat.IsDir()
}
func cloneOrPullRepo(targetDirectory string, depsPath string, repoName string, shortName string) error {
checkDirectory := path.Join(targetDirectory, ".git")
if directoryExists(checkDirectory) {
return gitPull(targetDirectory, repoName)
}
return gitClone(depsPath, repoName, shortName)
}
func copyDependency(rootPath string, depsPath string, repoName string, mode Mode) error {
shortName := RepoNameToShortName(repoName)
targetDirectory := path.Join(depsPath, shortName)
fmt.Printf("copy from '%v' to '%v'\n", shortName, targetDirectory)
if mode != Symlink {
os.MkdirAll(targetDirectory, 0755)
}
switch mode {
case Symlink:
return symlinkRepo(rootPath, depsPath, repoName)
case Clone:
return cloneOrPullRepo(targetDirectory, depsPath, repoName, shortName)
case Wget:
return wgetRepo(rootPath, depsPath, repoName)
default:
return fmt.Errorf("unknown mode")
}
}
func establishPackageAndReadConfig(rootPath string, depsPath string, packageName string, mode Mode) (*Config, error) {
copyErr := copyDependency(rootPath, depsPath, packageName, mode)
if copyErr != nil {
return nil, copyErr
}
directoryName := RepoNameToShortName(packageName)
packageDirectory := path.Join(depsPath, directoryName)
conf, confErr := ReadConfigFromDirectory(packageDirectory)
if confErr != nil {
return nil, confErr
}
if conf.Name != packageName {
return nil, fmt.Errorf("name mismatch %v vs %v", conf.Name, packageName)
}
return conf, confErr
}
type DependencyNode struct {
name string
version semver.Version
artifactType ArtifactType
dependencies []*DependencyNode
development []*DependencyNode
dependingOnThis []*DependencyNode
}
func (n *DependencyNode) Name() string {
return n.name
}
func (n *DependencyNode) ArtifactType() ArtifactType {
return n.artifactType
}
func (n *DependencyNode) ShortName() string {
return RepoNameToShortName(n.name)
}
func (n *DependencyNode) Dependencies() []*DependencyNode {
return n.dependencies
}
func (n *DependencyNode) AddDependingOnThis(node *DependencyNode) {
n.dependingOnThis = append(n.dependingOnThis, node)
}
func (n *DependencyNode) AddDependency(node *DependencyNode) {
n.dependencies = append(n.dependencies, node)
node.AddDependingOnThis(n)
}
func (n *DependencyNode) AddDevelopment(node *DependencyNode) {
n.development = append(n.development, node)
}
func (n *DependencyNode) String() string {
return fmt.Sprintf("node %v %v", n.name, n.version)
}
func (n *DependencyNode) Print(indent int) {
indentString := strings.Repeat("..", indent)
fmt.Printf("%s %v\n", indentString, n)
for _, depNode := range n.dependencies {
depNode.Print(indent + 1)
}
}
type DependencyInfo struct {
RootPath string
PackageRootPath string
RootNodes []*DependencyNode
RootNode *DependencyNode
}
type Cache struct {
nodes map[string]*DependencyNode
}
func NewCache() *Cache {
return &Cache{nodes: make(map[string]*DependencyNode)}
}
func (c *Cache) FindNode(name string) *DependencyNode {
return c.nodes[name]
}
func (c *Cache) AddNode(name string, node *DependencyNode) {
c.nodes[name] = node
}
func handleNode(rootPath string, depsPath string, node *DependencyNode, cache *Cache, depName string, mode Mode) (*DependencyNode, error) {
foundNode := cache.FindNode(depName)
if foundNode == nil {
depConf, confErr := establishPackageAndReadConfig(rootPath, depsPath, depName, mode)
if confErr != nil {
return nil, confErr
}
var convertErr error
foundNode, convertErr = convertFromConfigNode(rootPath, depsPath, depConf, cache, mode)
if convertErr != nil {
return nil, convertErr
}
}
return foundNode, nil
}
type ArtifactType uint
const (
Library ArtifactType = iota
ConsoleApplication
Application
Inherit
)
func ToArtifactType(v string) ArtifactType {
if v == "lib" {
return Library
}
if v == "console" {
return ConsoleApplication
}
if v == "executable" {
return Application
}
return Library
}
func convertFromConfigNode(rootPath string, depsPath string, conf *Config, cache *Cache, mode Mode) (*DependencyNode, error) {
artifactType := ToArtifactType(conf.ArtifactType)
node := &DependencyNode{name: conf.Name, version: semver.MustParse(conf.Version), artifactType: artifactType}
cache.AddNode(conf.Name, node)
for _, dep := range conf.Dependencies {
foundNode, handleErr := handleNode(rootPath, depsPath, node, cache, dep.Name, mode)
if handleErr != nil {
return nil, handleErr
}
node.AddDependency(foundNode)
}
const useDevelopmentDependencies = true
if useDevelopmentDependencies {
for _, dep := range conf.Development {
_, handleErr := handleNode(rootPath, depsPath, node, cache, dep.Name, mode)
if handleErr != nil {
return nil, handleErr
}
//node.AddDevelopment(foundNode)
}
}
return node, nil
}
func calculateTotalDependencies(rootPath string, depsPath string, conf *Config, mode Mode) (*Cache, *DependencyNode, error) {
cache := NewCache()
rootNode, rootNodeErr := convertFromConfigNode(rootPath, depsPath, conf, cache, mode)
return cache, rootNode, rootNodeErr
}
func SetupDependencies(filename string, mode Mode, forceClean bool) (*DependencyInfo, error) {
conf, confErr := ReadConfigFromFilename(filename)
if confErr != nil {
return nil, confErr
}
packageRootPath := path.Dir(filename)
rootPath := path.Dir(packageRootPath)
depsPath := filepath.Join(packageRootPath, "deps/")
if mode != Clone || forceClean {
if err := BackupDeps(depsPath); err != nil {
return nil, err
}
}
os.Mkdir(depsPath, 0755)
cache, rootNode, rootNodeErr := calculateTotalDependencies(rootPath, depsPath, conf, mode)
if rootNodeErr != nil {
return nil, rootNodeErr
}
var rootNodes []*DependencyNode
for _, node := range cache.nodes {
if node.name == rootNode.name {
continue
}
rootNodes = append(rootNodes, node)
}
//rootNode.Print(0)
info := &DependencyInfo{RootPath: rootPath, PackageRootPath: packageRootPath, RootNode: rootNode, RootNodes: rootNodes}
return info, nil
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
bin/get-github-release.go | //go:build ignore
// +build ignore
// Get the latest release from a github project
//
// If GITHUB_USER and GITHUB_TOKEN are set then these will be used to
// authenticate the request which is useful to avoid rate limits.
package main
import (
"archive/tar"
"compress/bzip2"
"compress/gzip"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html"
"golang.org/x/sys/unix"
)
var (
// Flags
install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.")
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.")
// Globals
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
osAliases = map[string][]string{
"darwin": {"macos", "osx"},
}
archAliases = map[string][]string{
"amd64": {"x86_64"},
}
)
// A github release
//
// Made by pasting the JSON into https://mholt.github.io/json-to-go/
type Release struct {
URL string `json:"url"`
AssetsURL string `json:"assets_url"`
UploadURL string `json:"upload_url"`
HTMLURL string `json:"html_url"`
ID int `json:"id"`
TagName string `json:"tag_name"`
TargetCommitish string `json:"target_commitish"`
Name string `json:"name"`
Draft bool `json:"draft"`
Author struct {
Login string `json:"login"`
ID int `json:"id"`
AvatarURL string `json:"avatar_url"`
GravatarID string `json:"gravatar_id"`
URL string `json:"url"`
HTMLURL string `json:"html_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
OrganizationsURL string `json:"organizations_url"`
ReposURL string `json:"repos_url"`
EventsURL string `json:"events_url"`
ReceivedEventsURL string `json:"received_events_url"`
Type string `json:"type"`
SiteAdmin bool `json:"site_admin"`
} `json:"author"`
Prerelease bool `json:"prerelease"`
CreatedAt time.Time `json:"created_at"`
PublishedAt time.Time `json:"published_at"`
Assets []struct {
URL string `json:"url"`
ID int `json:"id"`
Name string `json:"name"`
Label string `json:"label"`
Uploader struct {
Login string `json:"login"`
ID int `json:"id"`
AvatarURL string `json:"avatar_url"`
GravatarID string `json:"gravatar_id"`
URL string `json:"url"`
HTMLURL string `json:"html_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
OrganizationsURL string `json:"organizations_url"`
ReposURL string `json:"repos_url"`
EventsURL string `json:"events_url"`
ReceivedEventsURL string `json:"received_events_url"`
Type string `json:"type"`
SiteAdmin bool `json:"site_admin"`
} `json:"uploader"`
ContentType string `json:"content_type"`
State string `json:"state"`
Size int `json:"size"`
DownloadCount int `json:"download_count"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
BrowserDownloadURL string `json:"browser_download_url"`
} `json:"assets"`
TarballURL string `json:"tarball_url"`
ZipballURL string `json:"zipball_url"`
Body string `json:"body"`
}
// checks if a path has write access
func writable(path string) bool {
return unix.Access(path, unix.W_OK) == nil
}
// Directory to install releases in by default
//
// Find writable directories on $PATH. Use $GOPATH/bin if that is on
// the path and writable or use the first writable directory which is
// in $HOME or failing that the first writable directory.
//
// Returns "" if none of the above were found
func defaultBinDir() string {
home := os.Getenv("HOME")
var (
bin string
homeBin string
goHomeBin string
gopath = os.Getenv("GOPATH")
)
for _, dir := range strings.Split(os.Getenv("PATH"), ":") {
if writable(dir) {
if strings.HasPrefix(dir, home) {
if homeBin != "" {
homeBin = dir
}
if gopath != "" && strings.HasPrefix(dir, gopath) && goHomeBin == "" {
goHomeBin = dir
}
}
if bin == "" {
bin = dir
}
}
}
if goHomeBin != "" {
return goHomeBin
}
if homeBin != "" {
return homeBin
}
return bin
}
// read the body or an error message
func readBody(in io.Reader) string {
data, err := ioutil.ReadAll(in)
if err != nil {
return fmt.Sprintf("Error reading body: %v", err.Error())
}
return string(data)
}
// Get an asset URL and name
func getAsset(project string, matchName *regexp.Regexp) (string, string) {
url := "https://api.github.com/repos/" + project + "/releases/latest"
log.Printf("Fetching asset info for %q from %q", project, url)
user, pass := os.Getenv("GITHUB_USER"), os.Getenv("GITHUB_TOKEN")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatalf("Failed to make http request %q: %v", url, err)
}
if user != "" && pass != "" {
log.Printf("Fetching using GITHUB_USER and GITHUB_TOKEN")
req.SetBasicAuth(user, pass)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatalf("Failed to fetch release info %q: %v", url, err)
}
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, url, resp.Status)
}
var release Release
err = json.NewDecoder(resp.Body).Decode(&release)
if err != nil {
log.Fatalf("Failed to decode release info: %v", err)
}
err = resp.Body.Close()
if err != nil {
log.Fatalf("Failed to close body: %v", err)
}
for _, asset := range release.Assets {
//log.Printf("Finding %s", asset.Name)
if matchName.MatchString(asset.Name) && isOurOsArch(asset.Name) {
return asset.BrowserDownloadURL, asset.Name
}
}
log.Fatalf("Didn't find asset in info")
return "", ""
}
// Get an asset URL and name by scraping the downloads page
//
// This doesn't use the API so isn't rate limited when not using GITHUB login details
func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) {
baseURL := "https://github.com/" + project + "/releases"
log.Printf("Fetching asset info for %q from %q", project, baseURL)
base, err := url.Parse(baseURL)
if err != nil {
log.Fatalf("URL Parse failed: %v", err)
}
resp, err := http.Get(baseURL)
if err != nil {
log.Fatalf("Failed to fetch release info %q: %v", baseURL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status)
}
doc, err := html.Parse(resp.Body)
if err != nil {
log.Fatalf("Failed to parse web page: %v", err)
}
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
if u, err := rest.URLJoin(base, a.Val); err == nil {
if assetName == "" {
assetName = name
assetURL = u.String()
}
}
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
walk(c)
}
}
walk(doc)
if assetName == "" || assetURL == "" {
log.Fatalf("Didn't find URL in page")
}
return assetURL, assetName
}
// isOurOsArch returns true if s contains our OS and our Arch
func isOurOsArch(s string) bool {
s = strings.ToLower(s)
check := func(base string, aliases map[string][]string) bool {
names := []string{base}
names = append(names, aliases[base]...)
for _, name := range names {
if strings.Contains(s, name) {
return true
}
}
return false
}
return check(runtime.GOARCH, archAliases) && check(runtime.GOOS, osAliases)
}
// get a file for download
func getFile(url, fileName string) {
log.Printf("Downloading %q from %q", fileName, url)
out, err := os.Create(fileName)
if err != nil {
log.Fatalf("Failed to open %q: %v", fileName, err)
}
resp, err := http.Get(url)
if err != nil {
log.Fatalf("Failed to fetch asset %q: %v", url, err)
}
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q asset: %s", resp.StatusCode, url, resp.Status)
}
n, err := io.Copy(out, resp.Body)
if err != nil {
log.Fatalf("Error while downloading: %v", err)
}
err = resp.Body.Close()
if err != nil {
log.Fatalf("Failed to close body: %v", err)
}
err = out.Close()
if err != nil {
log.Fatalf("Failed to close output file: %v", err)
}
log.Printf("Downloaded %q (%d bytes)", fileName, n)
}
// run a shell command
func run(args ...string) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
log.Fatalf("Failed to run %v: %v", args, err)
}
}
// Untars fileName from srcFile
func untar(srcFile, fileName, extractDir string) {
f, err := os.Open(srcFile)
if err != nil {
log.Fatalf("Couldn't open tar: %v", err)
}
defer func() {
err := f.Close()
if err != nil {
log.Fatalf("Couldn't close tar: %v", err)
}
}()
var in io.Reader = f
srcExt := filepath.Ext(srcFile)
if srcExt == ".gz" || srcExt == ".tgz" {
gzf, err := gzip.NewReader(f)
if err != nil {
log.Fatalf("Couldn't open gzip: %v", err)
}
in = gzf
} else if srcExt == ".bz2" {
in = bzip2.NewReader(f)
}
tarReader := tar.NewReader(in)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("Trouble reading tar file: %v", err)
}
name := header.Name
switch header.Typeflag {
case tar.TypeReg:
baseName := filepath.Base(name)
if baseName == fileName {
outPath := filepath.Join(extractDir, fileName)
out, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
if err != nil {
log.Fatalf("Couldn't open output file: %v", err)
}
n, err := io.Copy(out, tarReader)
if err != nil {
log.Fatalf("Couldn't write output file: %v", err)
}
if err = out.Close(); err != nil {
log.Fatalf("Couldn't close output: %v", err)
}
log.Printf("Wrote %s (%d bytes) as %q", fileName, n, outPath)
}
}
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 2 {
log.Fatalf("Syntax: %s <user/project> <name reg exp>", os.Args[0])
}
project, nameRe := args[0], args[1]
if !matchProject.MatchString(project) {
log.Fatalf("Project %q must be in form user/project", project)
}
matchName, err := regexp.Compile(nameRe)
if err != nil {
log.Fatalf("Invalid regexp for name %q: %v", nameRe, err)
}
var assetURL, assetName string
if *useAPI {
assetURL, assetName = getAsset(project, matchName)
} else {
assetURL, assetName = getAssetFromReleasesPage(project, matchName)
}
fileName := filepath.Join(os.TempDir(), assetName)
getFile(assetURL, fileName)
if *install {
log.Printf("Installing %s", fileName)
run("sudo", "dpkg", "--force-bad-version", "-i", fileName)
log.Printf("Installed %s", fileName)
} else if *extract != "" {
if *bindir == "" {
log.Fatalf("Need to set -bindir")
}
log.Printf("Unpacking %s from %s and installing into %s", *extract, fileName, *bindir)
untar(fileName, *extract, *bindir+"/")
}
}
| [
"\"HOME\"",
"\"GOPATH\"",
"\"PATH\"",
"\"GITHUB_USER\"",
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_USER",
"GOPATH",
"HOME",
"PATH",
"GITHUB_TOKEN"
]
| [] | ["GITHUB_USER", "GOPATH", "HOME", "PATH", "GITHUB_TOKEN"] | go | 5 | 0 | |
watch.go | // Package watcher watches all file changes via fsnotify package and sends
// update events to builder
package watcher
import (
"errors"
"fmt"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
fsnotify "gopkg.in/fsnotify.v1"
)
// GoPath not set error
var ErrPathNotSet = errors.New("gopath not set")
var watchedFileExt = []string{".go", ".tmpl", ".tpl", ".html"}
var watchDelta = 1000 * time.Millisecond
// Watcher watches the file change events from fsnotify and
// sends update messages. It is also used as a fsnotify.Watcher wrapper
type Watcher struct {
rootdir string
watcher *fsnotify.Watcher
watchVendor bool
// when a file gets changed a message is sent to the update channel
update chan struct{}
}
// MustRegisterWatcher creates a new Watcher and starts listening to
// given folders
func MustRegisterWatcher(params *Params) *Watcher {
watchVendorStr := params.Get("watch-vendor")
var watchVendor bool
var err error
if watchVendorStr != "" {
watchVendor, err = strconv.ParseBool(watchVendorStr)
if err != nil {
log.Printf("Wrong watch-vendor value: %s (default=false)\n", watchVendorStr)
}
}
w := &Watcher{
update: make(chan struct{}),
rootdir: params.Get("watch"),
watchVendor: watchVendor,
}
w.watcher, err = fsnotify.NewWatcher()
if err != nil {
log.Fatalf("Could not register watcher: %s", err)
}
// add folders that will be watched
w.watchFolders()
return w
}
// Watch listens file updates, and sends signal to
// update channel when .go and .tmpl files are updated
func (w *Watcher) Watch() {
eventSent := false
for {
select {
case event := <-w.watcher.Events:
// discard chmod events
if event.Op&fsnotify.Chmod != fsnotify.Chmod {
// test files do not need a rebuild
if isTestFile(event.Name) {
continue
}
if !isWatchedFileType(event.Name) {
continue
}
if eventSent {
continue
}
eventSent = true
// prevent consequent builds
go func() {
w.update <- struct{}{}
time.Sleep(watchDelta)
eventSent = false
}()
}
case err := <-w.watcher.Errors:
if err != nil {
log.Fatalf("Watcher error: %s", err)
}
return
}
}
}
func isTestFile(fileName string) bool {
return strings.HasSuffix(filepath.Base(fileName), "_test.go")
}
func isWatchedFileType(fileName string) bool {
ext := filepath.Ext(fileName)
return existIn(ext, watchedFileExt)
}
// Wait waits for the latest messages
func (w *Watcher) Wait() <-chan struct{} {
return w.update
}
// Close closes the fsnotify watcher channel
func (w *Watcher) Close() {
w.watcher.Close()
close(w.update)
}
// watchFolders recursively adds folders that will be watched against the changes,
// starting from the working directory
func (w *Watcher) watchFolders() {
wd, err := w.prepareRootDir()
if err != nil {
log.Fatalf("Could not get root working directory: %s", err)
}
filepath.Walk(wd, func(path string, info os.FileInfo, err error) error {
// skip files
if info == nil {
log.Fatalf("wrong watcher package: %s", path)
}
if !info.IsDir() {
return nil
}
if !w.watchVendor {
// skip vendor directory
vendor := fmt.Sprintf("%s/vendor", wd)
if strings.HasPrefix(path, vendor) {
return filepath.SkipDir
}
}
// skip hidden folders
if len(path) > 1 && strings.HasPrefix(filepath.Base(path), ".") {
return filepath.SkipDir
}
w.addFolder(path)
return err
})
}
// addFolder adds given folder name to the watched folders, and starts
// watching it for further changes
func (w *Watcher) addFolder(name string) {
if err := w.watcher.Add(name); err != nil {
log.Fatalf("Could not watch folder: %s", err)
}
}
// prepareRootDir prepares working directory depending on root directory
func (w *Watcher) prepareRootDir() (string, error) {
if w.rootdir == "" {
return os.Getwd()
}
path := os.Getenv("GOPATH")
if path == "" {
return "", ErrPathNotSet
}
root := fmt.Sprintf("%s/src/%s", path, w.rootdir)
return root, nil
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CommonMark-py documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 4 18:11:52 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CommonMark-py'
copyright = '2014-2018, Roland Shoemaker, Bibek Kafle, Nik Nyby'
author = 'Roland Shoemaker, Bibek Kafle, Nik Nyby'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'description': 'A Python CommonMark library',
'github_user': 'rtfd',
'github_repo': 'CommonMark-py',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CommonMark-pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CommonMark-py.tex', 'CommonMark-py Documentation',
'Roland Shoemaker, Bibek Kafle', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'commonmark-py', 'CommonMark-py Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CommonMark-py', 'CommonMark-py Documentation',
author, 'CommonMark-py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
pkg/daemon/util/cmdreporter_test.go | /*
Copyright 2019 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"testing"
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
func TestCommandMarshallingUnmarshalling(t *testing.T) {
type args struct {
cmd []string
args []string
}
tests := []struct {
name string
args args
}{
{name: "one command only", args: args{cmd: []string{"one"}, args: []string{}}},
{name: "no command or args", args: args{
cmd: []string{}, args: []string{}}},
{name: "no command w/ args", args: args{
cmd: []string{}, args: []string{"arg1", "arg2"}}},
{name: "one command w/ args", args: args{
cmd: []string{"one"}, args: []string{"arg1", "arg2"}}},
{name: "multi command only", args: args{
cmd: []string{"one", "two", "three"}, args: []string{}}},
{name: "multi command and arg", args: args{
cmd: []string{"one", "two", "three"}, args: []string{"arg1", "arg2", "--", "arg3"}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
flagArg, err1 := CommandToCmdReporterFlagArgument(tt.args.cmd, tt.args.args)
if err1 != nil {
t.Errorf("CommandToFlagArgument() error = %+v, wanted no err", err1)
}
cmd, args, err2 := CmdReporterFlagArgumentToCommand(flagArg)
if err2 != nil {
t.Errorf("FlagArgumentToCommand() error = %+v, wanted no err", err2)
}
if err1 != nil || err2 != nil {
return
}
assert.Equal(t, tt.args.cmd, cmd)
assert.Equal(t, tt.args.args, args)
})
}
}
func TestNew(t *testing.T) {
client := fake.NewSimpleClientset
type fields struct {
clientset kubernetes.Interface
cmd []string
args []string
configMapName string
namespace string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{"all is well", fields{client(), []string{"cmd"}, []string{"args"}, "myConfigMap", "myNamespace"}, false},
{"no k8s client", fields{nil, []string{"cmd"}, []string{"args"}, "myConfigMap", "myNamespace"}, true},
{"no command", fields{client(), []string{}, []string{"args"}, "myConfigMap", "myNamespace"}, true},
{"empty command", fields{client(), []string{""}, []string{"args"}, "myConfigMap", "myNamespace"}, true},
{"three commands", fields{client(), []string{"one", "two", "three"}, []string{"args"}, "myConfigMap", "myNamespace"}, false},
{"no args", fields{client(), []string{"cmd"}, []string{}, "myConfigMap", "myNamespace"}, false},
{"empty arg", fields{client(), []string{"cmd"}, []string{""}, "myConfigMap", "myNamespace"}, false}, // an empty arg can still be valid
{"three args", fields{client(), []string{"cmd"}, []string{"arg1", "arg2", "arg3"}, "myConfigMap", "myNamespace"}, false},
{"no configmap name", fields{client(), []string{"cmd"}, []string{"args"}, "", "myNamespace"}, true},
{"no namespace", fields{client(), []string{"cmd"}, []string{"args"}, "myConfigMap", ""}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r, err := NewCmdReporter(
tt.fields.clientset,
tt.fields.cmd,
tt.fields.args,
tt.fields.configMapName,
tt.fields.namespace,
)
if (err != nil) != tt.wantErr {
t.Errorf("Runner.Run() error = %v, wantErr %v", err, tt.wantErr)
}
if err == nil {
assert.NotNil(t, r)
assert.Equal(t, tt.fields.clientset, r.clientset)
assert.Equal(t, tt.fields.cmd, r.cmd)
assert.Equal(t, tt.fields.args, r.args)
assert.Equal(t, tt.fields.configMapName, r.configMapName)
assert.Equal(t, tt.fields.namespace, r.namespace)
}
})
}
}
func TestRunner_Run(t *testing.T) {
origExecCommand := execCommand
execCommand = mockExecCommand
defer func() { execCommand = origExecCommand }()
newClient := fake.NewSimpleClientset
verifyConfigMap := func(client kubernetes.Interface, stdout, stderr, retval, cmName, namespace string) {
cm, err := client.CoreV1().ConfigMaps(namespace).Get(cmName, metav1.GetOptions{})
fmt.Println("configmap:", cm)
assert.NoError(t, err)
assert.Equal(t, stdout, cm.Data[CmdReporterConfigMapStdoutKey])
assert.Equal(t, stderr, cm.Data[CmdReporterConfigMapStderrKey])
assert.Equal(t, retval, cm.Data[CmdReporterConfigMapRetcodeKey])
}
verifyCommand := func(cmd, args []string, command string) {
err := os.Setenv("GO_HELPER_PROCESS_PRINT_COMMAND", "1")
assert.NoError(t, err)
defer func() { os.Unsetenv("GO_HELPER_PROCESS_PRINT_COMMAND") }()
k8s := newClient()
r, err := NewCmdReporter(k8s, cmd, args, "command-configmap", "command-namespace")
assert.NoError(t, err)
assert.NoError(t, r.Run())
verifyConfigMap(k8s, command, "", "0", "command-configmap", "command-namespace")
}
verifyCommand([]string{"grep"}, []string{"-e", ".*time"}, "grep -e .*time")
verifyCommand([]string{"ceph-volume", "inventory"}, []string{"--format=json-pretty"}, "ceph-volume inventory --format=json-pretty")
verifyCommand([]string{"ceph-volume", "lvm", "list"}, []string{}, "ceph-volume lvm list")
verifyOutputs := func(stdout, stderr, retcode string) {
err := os.Setenv("GO_HELPER_PROCESS_STDOUT", stdout)
assert.NoError(t, err)
defer func() { os.Unsetenv("GO_HELPER_PROCESS_STDOUT") }()
err = os.Setenv("GO_HELPER_PROCESS_STDERR", stderr)
assert.NoError(t, err)
defer func() { os.Unsetenv("GO_HELPER_PROCESS_STDERR") }()
err = os.Setenv("GO_HELPER_PROCESS_RETCODE", retcode)
assert.NoError(t, err)
defer func() { os.Unsetenv("GO_HELPER_PROCESS_RETCODE") }()
k8s := newClient()
r, err := NewCmdReporter(k8s, []string{"standin-cmd"}, []string{"--some", "arg"}, "outputs-configmap", "outputs-namespace")
assert.NoError(t, err)
assert.NoError(t, r.Run())
verifyConfigMap(k8s, stdout, stderr, retcode, "outputs-configmap", "outputs-namespace")
}
verifyOutputs("", "", "0")
verifyOutputs("", "", "11")
verifyOutputs("this is my stdout", "", "0")
verifyOutputs("", "this is my stderr", "23")
verifyOutputs("this is ...", "... mixed outputs", "23")
// Verify that cmd-reporter won't overwrite a preexisting configmap with a different app name
k8s := newClient()
cm := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "preexisting-configmap",
Namespace: "preexisting-namespace",
Labels: map[string]string{
k8sutil.AppAttr: "some-other-application",
},
},
Data: map[string]string{},
}
_, err := k8s.CoreV1().ConfigMaps("preexisting-namespace").Create(cm)
assert.NoError(t, err)
r, err := NewCmdReporter(k8s, []string{"some-command"}, []string{"some", "args"}, "preexisting-configmap", "preexisting-namespace")
assert.NoError(t, err)
assert.Error(t, r.Run())
cm, err = k8s.CoreV1().ConfigMaps("preexisting-namespace").Get("preexisting-configmap", metav1.GetOptions{})
assert.NoError(t, err)
assert.NotContains(t, cm.Data, CmdReporterConfigMapStdoutKey)
assert.NotContains(t, cm.Data, CmdReporterConfigMapStderrKey)
assert.NotContains(t, cm.Data, CmdReporterConfigMapRetcodeKey)
// Verify that cmd-reporter WILL overwrite a preexisting configmap with cmd-reporter's app name
k8s = newClient()
cm = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "preexisting-configmap",
Namespace: "preexisting-namespace",
Labels: map[string]string{
k8sutil.AppAttr: CmdReporterAppName,
},
},
Data: map[string]string{},
}
_, err = k8s.CoreV1().ConfigMaps("preexisting-namespace").Create(cm)
assert.NoError(t, err)
r, err = NewCmdReporter(k8s, []string{"some-command"}, []string{"some", "args"}, "preexisting-configmap", "preexisting-namespace")
assert.NoError(t, err)
assert.NoError(t, r.Run())
verifyConfigMap(k8s, "", "", "0", "preexisting-configmap", "preexisting-namespace")
}
// Inspired by: https://github.com/golang/go/blob/master/src/os/exec/exec_test.go
func mockExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestCmdReporterHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...) //nolint:gosec //Rook controls the input to the exec arguments
// the existing environment will contain variables which define the desired return from the
// fake command which will be run.
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
return cmd
}
// TestHelperProcess isn't a real test. It's used as a helper process
// for TestParameterRun.
// Inspired by: https://github.com/golang/go/blob/master/src/os/exec/exec_test.go
func TestCmdReporterHelperProcess(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
args := os.Args
for len(args) > 0 {
if args[0] == "--" {
args = args[1:]
break
}
args = args[1:]
}
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "No command\n")
os.Exit(2)
}
userCommand := args
// test should set these in its environment to control the output of the test commands
stdout := os.Getenv("GO_HELPER_PROCESS_STDOUT")
stderr := os.Getenv("GO_HELPER_PROCESS_STDERR")
retcode := os.Getenv("GO_HELPER_PROCESS_RETCODE")
if os.Getenv("GO_HELPER_PROCESS_PRINT_COMMAND") == "1" {
stdout = strings.Join(userCommand, " ")
stderr = ""
retcode = ""
}
if stdout != "" {
fmt.Fprint(os.Stdout, stdout)
}
if stderr != "" {
fmt.Fprint(os.Stderr, stderr)
}
if retcode != "" {
rc, err := strconv.Atoi(retcode)
if err != nil {
panic(err)
}
os.Exit(rc)
}
os.Exit(0)
}
| [
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_HELPER_PROCESS_STDOUT\"",
"\"GO_HELPER_PROCESS_STDERR\"",
"\"GO_HELPER_PROCESS_RETCODE\"",
"\"GO_HELPER_PROCESS_PRINT_COMMAND\""
]
| []
| [
"GO_WANT_HELPER_PROCESS",
"GO_HELPER_PROCESS_RETCODE",
"GO_HELPER_PROCESS_PRINT_COMMAND",
"GO_HELPER_PROCESS_STDERR",
"GO_HELPER_PROCESS_STDOUT"
]
| [] | ["GO_WANT_HELPER_PROCESS", "GO_HELPER_PROCESS_RETCODE", "GO_HELPER_PROCESS_PRINT_COMMAND", "GO_HELPER_PROCESS_STDERR", "GO_HELPER_PROCESS_STDOUT"] | go | 5 | 0 | |
examples/tuning/breast_cancer.py | """
pynet: hyper parameters tuning
==============================
Credit: A Grigis
Based on:
- https://github.com/autonomio/talos/blob/master/docs/Examples_PyTorch.md
In this tutorial, you will learn how to tune the hyperparameters using the
talos and the kerasplotlib modules.
"""
import talos
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch_optimizer import torch_optimizer
from sklearn.metrics import f1_score
from pynet.interfaces import DeepLearningInterface
from pynet.datasets import DataManager
#############################################################################
# Data Preparation
# ----------------
#
# For this experiment, we're going to use the breast cancer dataset.
x, y = talos.templates.datasets.breast_cancer()
x = talos.utils.rescale_meanzero(x)
x_train, y_train, x_val, y_val = talos.utils.val_split(x, y, .2)
print("Train: ", x_train.shape, y_train.shape)
print("Validation: ", x_val.shape, y_val.shape)
#############################################################################
# Model Preparation
# -----------------
#
# Talos works with any pynet model, without changing the structure of the
# model in anyway, or without introducing any new syntax. The below example
# shows clearly how this works.
class BreastCancerNet(nn.Module, talos.utils.TorchHistory):
def __init__(self, n_feature, first_neuron, second_neuron, dropout):
super(BreastCancerNet, self).__init__()
self.hidden = torch.nn.Linear(n_feature, first_neuron)
torch.nn.init.normal_(self.hidden.weight)
self.hidden1 = torch.nn.Linear(first_neuron, second_neuron)
self.dropout = torch.nn.Dropout(dropout)
self.out = torch.nn.Linear(second_neuron, 2)
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.dropout(x)
x = torch.sigmoid(self.hidden1(x))
x = self.out(x)
return x
def update_talos_history(signal):
""" Callback to update talos history.
Parameters
----------
signal: SignalObject
an object with the trained model 'object', the emitted signal
'signal', the epoch number 'epoch' and the fold index 'fold'.
"""
net = signal.object.model
emitted_signal = signal.signal
epoch = signal.epoch
fold = signal.fold
for key in signal.keys:
if key in ("epoch", "fold"):
continue
value = getattr(signal, key)
if value is not None:
net.append_history(value, key)
def breast_cancer(x_train, y_train, x_val, y_val, params):
print("Iteration parameters: ", params)
def weights_init_uniform_rule(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
n = m.in_features
y = 1.0 / np.sqrt(n)
m.weight.data.uniform_(-y, y)
m.bias.data.fill_(0)
manager = DataManager.from_numpy(
train_inputs=x_train, train_labels=y_train,
batch_size=params["batch_size"], validation_inputs=x_val,
validation_labels=y_val)
net = BreastCancerNet(
n_feature=x_train.shape[1], first_neuron=params["first_neuron"],
second_neuron=params["second_neuron"], dropout=params["dropout"])
net.apply(weights_init_uniform_rule)
net.init_history()
model = DeepLearningInterface(
model=net,
optimizer_name=params["optimizer_name"],
learning_rate=params["learning_rate"],
loss_name=params["loss_name"],
metrics=["accuracy"])
model.add_observer("after_epoch", update_talos_history)
model.training(
manager=manager,
nb_epochs=params["epochs"],
checkpointdir=None,
fold_index=0,
with_validation=True)
return net, net.parameters()
#############################################################################
# Setting the Parameter Space Boundaries
# --------------------------------------
#
# In the last and final step, we're going to create the dictionary, which will
# then be passed on to Talos together with the model above. Here we have
# three different ways to input values:
# - as stepped ranges (min, max, steps)
# - as multiple values [in a list]
# - as a single value [in a list]
# For values we don't want to use, it's ok to set it as None.
params = {
"first_neuron": [200, 100],
"second_neuron": [30, 50],
"dropout": [0.2, 0.3],
"optimizer_name": ["SGD", "Adam"],
"loss_name": ["CrossEntropyLoss"],
"learning_rate": [1e-3, 1e-4],
"batch_size": [20, 50, 5],
"epochs": [10, 20]
}
#############################################################################
# Run the Hyperparameter scan
# ---------------------------
#
# Now we are ready to run the model based on the parameters and the layer
# configuration above. The exact same process would apply with any other
# model, just make sure to pass the model function name in the Scan() command
# as in the below example. To get started quickly, we're going to invoke only
# 10 rounds.
os.chdir("/tmp")
scan_object = talos.Scan(x=x_train,
y=y_train,
params=params,
model=breast_cancer,
experiment_name="breast_cancer",
round_limit=10)
#############################################################################
# Access the results through the Scan object
# ------------------------------------------
#
print("accessing the results data frame")
print(scan_object.data.head())
print("accessing epoch entropy values for each round")
print(scan_object.learning_entropy)
print("access the summary details")
print(scan_object.details)
print("accessing the saved models")
print(scan_object.saved_models)
print("accessing the saved weights for models")
print(scan_object.saved_weights)
#############################################################################
# Analysing the Scan results with reporting
# -----------------------------------------
#
print("use Scan object as input")
analyze_object = talos.Analyze(scan_object)
print("access the dataframe with the results")
print(analyze_object.data)
print("get the number of rounds in the Scan")
print(analyze_object.rounds())
print("et the highest result for any metric")
print(analyze_object.high('val_accuracy'))
print("get the round with the best result")
print(analyze_object.rounds2high('val_accuracy'))
print("get the best paramaters")
print(analyze_object.best_params(
'val_accuracy', ['accuracy', 'loss', 'val_loss']))
print("get correlation for hyperparameters against a metric")
print(analyze_object.correlate('val_loss', ['accuracy', 'loss', 'val_loss']))
print("a regression plot for two dimensions")
analyze_object.plot_regs('val_accuracy', 'val_loss')
print("line plot")
analyze_object.plot_line('val_accuracy')
print("up to two dimensional kernel density estimator")
analyze_object.plot_kde('val_accuracy')
print("a simple histogram")
analyze_object.plot_hist('val_accuracy', bins=50)
print("heatmap correlation")
analyze_object.plot_corr('val_loss', ['accuracy', 'loss', 'val_loss'])
print("a four dimensional bar grid")
analyze_object.plot_bars(
'batch_size', 'val_accuracy', 'first_neuron', 'learning_rate')
if "CI_MODE" not in os.environ:
import matplotlib.pyplot as plt
plt.show()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
gym_ds3/envs/utils/helper_deepsocs.py | import os
import numpy as np
from scipy import signal
import tensorflow as tf
def count_vars(scope):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def placeholder(dim=None):
return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))
def placeholders(*args):
"""
Usage: a_ph,b_ph,c_ph = placeholders(adim,bdim,None)
"""
return [placeholder(dim) for dim in args]
def get_vars(scope):
return [x for x in tf.compat.v1.global_variables() if scope in x.name]
def count_vars(scope):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def decrease_var(var, min_var, decay_rate):
if var - decay_rate >= min_var:
var -= decay_rate
else:
var = min_var
return var
def aggregate_gradients(gradients):
ground_gradients = [np.zeros(g.shape) for g in gradients[0]]
for gradient in gradients:
for i in range(len(ground_gradients)):
ground_gradients[i] += gradient[i]
return ground_gradients
def suppress_tf_warning():
import tensorflow as tf
import os
import logging
from tensorflow.python.util import deprecation
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# tf.logging.set_verbosity(tf.logging.ERROR)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.getLogger('tensorflow').disabled = True
deprecation._PRINT_DEPRECATION_WARNINGS = False
def discount_cumsum(x, discount):
"""
Compute discounted cumulative sums of vectors.
input:
vector x, [x0, x1, x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
def discount(x, gamma):
out = np.zeros(x.shape)
out[-1] = x[-1]
for i in reversed(range(len(x) -1)):
out[i] = x[i] + gamma * out[i+1]
return out
def truncate_experiences(lst):
batch_pts = [i for i, x in enumerate(lst) if x]
batch_pts.append(len(lst))
return batch_pts
def check_obs_change(prev_obs, curr_obs):
if len(prev_obs) != len(curr_obs):
return True
return False
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def statistics_scalar(x, with_min_and_max=False):
"""
Get mean/std and optional min/max of scalar x
Args:
x: An array containing samples of the scalar to produce statistics for.
with_min_and_max (bool): If true, return min and max of x in
addition to mean and std.
"""
x = np.array(x, dtype=np.float32)
global_sum, global_n = np.sum(x), len(x)
mean = global_sum / global_n
global_sum_sq = np.sum((x - mean) ** 2)
std = np.sqrt(global_sum_sq / global_n) # compute global std
if with_min_and_max:
global_min = (np.min(x) if len(x) > 0 else np.inf)
global_max = (np.max(x) if len(x) > 0 else -np.inf)
return mean, std, global_min, global_max
return mean, std
## sparse_op
class SparseMat(object):
def __init__(self, dtype, shape):
self.dtype = dtype
self.shape = shape
self.row = []
self.col = []
self.data = []
def add(self, row, col, data):
self.row.append(row)
self.col.append(col)
self.data.append(data)
def get_col(self):
return np.array(self.col)
def get_row(self):
return np.array(self.row)
def get_data(self):
return np.array(self.data)
def absorb_sp_mats(in_mats, depth):
"""
Merge multiple sparse matrices to
a giant one on its diagonal
e.g.,
[0, 1, 0] [0, 1, 0] [0, 0, 1]
[1, 0, 0] [0, 0, 1] [0, 1, 0]
[0, 0, 1] [1, 0, 0] [0, 1, 0]
to
[0, 1, 0]
[1, 0, 0] .. .. .. ..
[0, 0, 1]
[0, 1, 0]
.. .. [0, 0, 1] .. ..
[1, 0, 0]
[0, 0, 1]
.. .. .. .. [0, 1, 0]
[0, 1, 0]
where ".." are all zeros
depth is on the 3rd dimension,
which is orthogonal to the planar
operations above
output SparseTensorValue from tensorflow
"""
sp_mats = []
for d in range(depth):
row_idx = []
col_idx = []
data = []
shape = 0
base = 0
for m in in_mats:
row_idx.append(m[d].get_row() + base)
col_idx.append(m[d].get_col() + base)
data.append(m[d].get_data())
shape += m[d].shape[0]
base += m[d].shape[0]
row_idx = np.hstack(row_idx)
col_idx = np.hstack(col_idx)
data = np.hstack(data)
indices = np.mat([row_idx, col_idx]).transpose()
sp_mats.append(tf.compat.v1.SparseTensorValue(
indices, data, (shape, shape)))
return sp_mats
def expand_sp_mat(sp, exp_step):
"""
Make a stack of same sparse matrix to
a giant one on its diagonal
The input is tf.SparseTensorValue
e.g., expand dimension 3
[0, 1, 0] [0, 1, 0]
[1, 0, 0] [1, 0, 0] .. .. .. ..
[0, 0, 1] [0, 0, 1]
[0, 1, 0]
to .. .. [1, 0, 0] .. ..
[0, 0, 1]
[0, 1, 0]
.. .. .. .. [1, 0, 0]
[0, 0, 1]
where ".." are all zeros
depth is on the 3rd dimension,
which is orthogonal to the planar
operations above
output SparseTensorValue from tensorflow
"""
extended_mat = []
depth = len(sp)
for d in range(depth):
row_idx = []
col_idx = []
data = []
shape = 0
base = 0
for i in range(exp_step):
indices = sp[d].indices.transpose()
row_idx.append(np.squeeze(np.asarray(indices[0, :]) + base))
col_idx.append(np.squeeze(np.asarray(indices[1, :]) + base))
data.append(sp[d].values)
shape += sp[d].dense_shape[0]
base += sp[d].dense_shape[0]
row_idx = np.hstack(row_idx)
col_idx = np.hstack(col_idx)
data = np.hstack(data)
indices = np.mat([row_idx, col_idx]).transpose()
extended_mat.append(tf.compat.v1.SparseTensorValue(
indices, data, (shape, shape)))
return extended_mat
def merge_and_extend_sp_mat(sp):
"""
Transform a stack of sparse matrix into a giant diagonal matrix
These sparse matrices should have same shape
e.g.,
list of
[1, 0, 1, 1] [0, 0, 0, 1]
[1, 1, 1, 1] [0, 1, 1, 1]
[0, 0, 1, 1] [1, 1, 1, 1]
to
[1, 0, 1, 1]
[1, 1, 1, 1] .. ..
[0, 0, 1, 1]
[0, 0, 0, 1]
.. .. [0, 1, 1, 1]
[1, 1, 1, 1]
"""
batch_size = len(sp)
row_idx = []
col_idx = []
data = []
shape = (sp[0].dense_shape[0] * batch_size, sp[0].dense_shape[1] * batch_size)
row_base = 0
col_base = 0
for b in range(batch_size):
indices = sp[b].indices.transpose()
row_idx.append(np.squeeze(np.asarray(indices[0, :]) + row_base))
col_idx.append(np.squeeze(np.asarray(indices[1, :]) + col_base))
data.append(sp[b].values)
row_base += sp[b].dense_shape[0]
col_base += sp[b].dense_shape[1]
row_idx = np.hstack(row_idx)
col_idx = np.hstack(col_idx)
data = np.hstack(data)
indices = np.mat([row_idx, col_idx]).transpose()
extended_mat = tf.compat.v1.SparseTensorValue(indices, data, shape)
return extended_mat
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
levant/deploy.go | package levant
import (
"fmt"
"os"
"strings"
"time"
nomad "github.com/hashicorp/nomad/api"
nomadStructs "github.com/hashicorp/nomad/nomad/structs"
"github.com/jrasell/levant/client"
"github.com/jrasell/levant/levant/structs"
"github.com/rs/zerolog/log"
)
// levantDeployment is the all deployment related objects for this Levant
// deployment invocation.
type levantDeployment struct {
nomad *nomad.Client
config *DeployConfig
}
// DeployConfig is the set of config structs required to run a Levant deploy.
type DeployConfig struct {
Deploy *structs.DeployConfig
Client *structs.ClientConfig
Plan *structs.PlanConfig
Template *structs.TemplateConfig
}
// newLevantDeployment sets up the Levant deployment object and Nomad client
// to interact with the Nomad API.
func newLevantDeployment(config *DeployConfig, nomadClient *nomad.Client) (*levantDeployment, error) {
var err error
if config.Deploy.EnvVault == true {
config.Deploy.VaultToken = os.Getenv("VAULT_TOKEN")
}
dep := &levantDeployment{}
dep.config = config
if nomadClient == nil {
dep.nomad, err = client.NewNomadClient(config.Client.Addr)
if err != nil {
return nil, err
}
} else {
dep.nomad = nomadClient
}
// Add the JobID as a log context field.
log.Logger = log.With().Str(structs.JobIDContextField, *config.Template.Job.ID).Logger()
return dep, nil
}
// TriggerDeployment provides the main entry point into a Levant deployment and
// is used to setup the clients before triggering the deployment process.
func TriggerDeployment(config *DeployConfig, nomadClient *nomad.Client) bool {
// Create our new deployment object.
levantDep, err := newLevantDeployment(config, nomadClient)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to setup Levant deployment")
return false
}
// Run the job validation steps and count updater.
preDepVal := levantDep.preDeployValidate()
if !preDepVal {
log.Error().Msg("levant/deploy: pre-deployment validation process failed")
return false
}
// Start the main deployment function.
success := levantDep.deploy()
if !success {
log.Error().Msg("levant/deploy: job deployment failed")
return false
}
log.Info().Msg("levant/deploy: job deployment successful")
return true
}
func (l *levantDeployment) preDeployValidate() (success bool) {
// Validate the job to check it is syntactically correct.
if _, _, err := l.nomad.Jobs().Validate(l.config.Template.Job, nil); err != nil {
log.Error().Err(err).Msg("levant/deploy: job validation failed")
return
}
// If job.Type isn't set we can't continue
if l.config.Template.Job.Type == nil {
log.Error().Msgf("levant/deploy: Nomad job `type` is not set; should be set to `%s`, `%s` or `%s`",
nomadStructs.JobTypeBatch, nomadStructs.JobTypeSystem, nomadStructs.JobTypeService)
return
}
if !l.config.Deploy.ForceCount {
if err := l.dynamicGroupCountUpdater(); err != nil {
return
}
}
return true
}
// deploy triggers a register of the job resulting in a Nomad deployment which
// is monitored to determine the eventual state.
func (l *levantDeployment) deploy() (success bool) {
log.Info().Msgf("levant/deploy: triggering a deployment")
l.config.Template.Job.VaultToken = &l.config.Deploy.VaultToken
eval, _, err := l.nomad.Jobs().Register(l.config.Template.Job, nil)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to register job with Nomad")
return
}
if l.config.Deploy.ForceBatch {
if eval.EvalID, err = l.triggerPeriodic(l.config.Template.Job.ID); err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to trigger periodic instance of job")
return
}
}
// Periodic and parameterized jobs do not return an evaluation and therefore
// can't perform the evaluationInspector unless we are forcing an instance of
// periodic which will yield an EvalID.
if !l.config.Template.Job.IsPeriodic() && !l.config.Template.Job.IsParameterized() ||
l.config.Template.Job.IsPeriodic() && l.config.Deploy.ForceBatch {
// Trigger the evaluationInspector to identify any potential errors in the
// Nomad evaluation run. As far as I can tell from testing; a single alloc
// failure in an evaluation means no allocs will be placed so we exit here.
err = l.evaluationInspector(&eval.EvalID)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: something")
return
}
}
if l.isJobZeroCount() {
return true
}
switch *l.config.Template.Job.Type {
case nomadStructs.JobTypeService:
// If the service job doesn't have an update stanza, the job will not use
// Nomad deployments.
if l.config.Template.Job.Update == nil {
log.Info().Msg("levant/deploy: job is not configured with update stanza, consider adding to use deployments")
return l.jobStatusChecker(&eval.EvalID)
}
log.Info().Msgf("levant/deploy: beginning deployment watcher for job")
// Get the deploymentID from the evaluationID so that we can watch the
// deployment for end status.
depID, err := l.getDeploymentID(eval.EvalID)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to get info of evaluation %s", eval.EvalID)
return
}
// Get the success of the deployment and return if we have success.
if success = l.deploymentWatcher(depID); success {
return
}
dep, _, err := l.nomad.Deployments().Info(depID, nil)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to query deployment %s for auto-revert check", dep.ID)
return
}
// If the job is not a canary job, then run the auto-revert checker, the
// current checking mechanism is slightly hacky and should be updated.
// The reason for this is currently the config.Job is populate from the
// rendered job and so a user could potentially not set canary meaning
// the field shows a null.
if l.config.Template.Job.Update.Canary == nil {
l.checkAutoRevert(dep)
} else if *l.config.Template.Job.Update.Canary == 0 {
l.checkAutoRevert(dep)
}
case nomadStructs.JobTypeBatch:
return l.jobStatusChecker(&eval.EvalID)
case nomadStructs.JobTypeSystem:
return l.jobStatusChecker(&eval.EvalID)
default:
log.Debug().Msgf("levant/deploy: Levant does not support advanced deployments of job type %s",
*l.config.Template.Job.Type)
success = true
}
return
}
func (l *levantDeployment) evaluationInspector(evalID *string) error {
for {
evalInfo, _, err := l.nomad.Evaluations().Info(*evalID, nil)
if err != nil {
return err
}
switch evalInfo.Status {
case nomadStructs.EvalStatusComplete, nomadStructs.EvalStatusFailed, nomadStructs.EvalStatusCancelled:
if len(evalInfo.FailedTGAllocs) == 0 {
log.Info().Msgf("levant/deploy: evaluation %s finished successfully", *evalID)
return nil
}
for group, metrics := range evalInfo.FailedTGAllocs {
// Check if any nodes have been exhausted of resources and therfore are
// unable to place allocs.
if metrics.NodesExhausted > 0 {
var exhausted, dimension []string
for e := range metrics.ClassExhausted {
exhausted = append(exhausted, e)
}
for d := range metrics.DimensionExhausted {
dimension = append(dimension, d)
}
log.Error().Msgf("levant/deploy: task group %s failed to place allocs, failed on %v and exhausted %v",
group, exhausted, dimension)
}
// Check if any node classes were filtered causing alloc placement
// failures.
if len(metrics.ClassFiltered) > 0 {
for f := range metrics.ClassFiltered {
log.Error().Msgf("levant/deploy: task group %s failed to place %v allocs as class \"%s\" was filtered",
group, len(metrics.ClassFiltered), f)
}
}
// Check if any node constraints were filtered causing alloc placement
// failures.
if len(metrics.ConstraintFiltered) > 0 {
for cf := range metrics.ConstraintFiltered {
log.Error().Msgf("levant/deploy: task group %s failed to place %v allocs as constraint \"%s\" was filtered",
group, len(metrics.ConstraintFiltered), cf)
}
}
}
// Do not return an error here; there could well be information from
// Nomad detailing filtered nodes but the deployment will still be
// successful. GH-220.
return nil
default:
time.Sleep(1 * time.Second)
continue
}
}
}
func (l *levantDeployment) deploymentWatcher(depID string) (success bool) {
var canaryChan chan interface{}
deploymentChan := make(chan interface{})
t := time.Now()
wt := time.Duration(5 * time.Second)
// Setup the canaryChan and launch the autoPromote go routine if autoPromote
// has been enabled.
if l.config.Deploy.Canary > 0 {
canaryChan = make(chan interface{})
go l.canaryAutoPromote(depID, l.config.Deploy.Canary, canaryChan, deploymentChan)
}
q := &nomad.QueryOptions{WaitIndex: 1, AllowStale: l.config.Client.AllowStale, WaitTime: wt}
for {
dep, meta, err := l.nomad.Deployments().Info(depID, q)
log.Debug().Msgf("levant/deploy: deployment %v running for %.2fs", depID, time.Since(t).Seconds())
// Listen for the deploymentChan closing which indicates Levant should exit
// the deployment watcher.
select {
case <-deploymentChan:
return false
default:
break
}
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to get info of deployment %s", depID)
return
}
if meta.LastIndex <= q.WaitIndex {
continue
}
q.WaitIndex = meta.LastIndex
cont, err := l.checkDeploymentStatus(dep, canaryChan)
if err != nil {
return false
}
if cont {
continue
} else {
return true
}
}
}
func (l *levantDeployment) checkDeploymentStatus(dep *nomad.Deployment, shutdownChan chan interface{}) (bool, error) {
switch dep.Status {
case nomadStructs.DeploymentStatusSuccessful:
log.Info().Msgf("levant/deploy: deployment %v has completed successfully", dep.ID)
return false, nil
case nomadStructs.DeploymentStatusRunning:
return true, nil
default:
if shutdownChan != nil {
log.Debug().Msgf("levant/deploy: deployment %v meaning canary auto promote will shutdown", dep.Status)
close(shutdownChan)
}
log.Error().Msgf("levant/deploy: deployment %v has status %s", dep.ID, dep.Status)
// Launch the failure inspector.
l.checkFailedDeployment(&dep.ID)
return false, fmt.Errorf("deployment failed")
}
}
// canaryAutoPromote handles Levant's canary-auto-promote functionality.
func (l *levantDeployment) canaryAutoPromote(depID string, waitTime int, shutdownChan, deploymentChan chan interface{}) {
// Setup the AutoPromote timer.
autoPromote := time.After(time.Duration(waitTime) * time.Second)
for {
select {
case <-autoPromote:
log.Info().Msgf("levant/deploy: auto-promote period %vs has been reached for deployment %s",
waitTime, depID)
// Check the deployment is healthy before promoting.
if healthy := l.checkCanaryDeploymentHealth(depID); !healthy {
log.Error().Msgf("levant/deploy: the canary deployment %s has unhealthy allocations, unable to promote", depID)
close(deploymentChan)
return
}
log.Info().Msgf("levant/deploy: triggering auto promote of deployment %s", depID)
// Promote the deployment.
_, _, err := l.nomad.Deployments().PromoteAll(depID, nil)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to promote deployment %s", depID)
close(deploymentChan)
return
}
case <-shutdownChan:
log.Info().Msg("levant/deploy: canary auto promote has been shutdown")
return
}
}
}
// checkCanaryDeploymentHealth is used to check the health status of each
// task-group within a canary deployment.
func (l *levantDeployment) checkCanaryDeploymentHealth(depID string) (healthy bool) {
var unhealthy int
dep, _, err := l.nomad.Deployments().Info(depID, &nomad.QueryOptions{AllowStale: l.config.Client.AllowStale})
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to query deployment %s for health", depID)
return
}
// Itertate each task in the deployment to determine is health status. If an
// unhealthy task is found, incrament the unhealthy counter.
for taskName, taskInfo := range dep.TaskGroups {
// skip any task groups which are not configured for canary deployments
if taskInfo.DesiredCanaries == 0 {
log.Debug().Msgf("levant/deploy: task %s has no desired canaries, skipping health checks in deployment %s", taskName, depID)
continue
}
if taskInfo.DesiredCanaries != taskInfo.HealthyAllocs {
log.Error().Msgf("levant/deploy: task %s has unhealthy allocations in deployment %s", taskName, depID)
unhealthy++
}
}
// If zero unhealthy tasks were found, continue with the auto promotion.
if unhealthy == 0 {
log.Debug().Msgf("levant/deploy: deployment %s has 0 unhealthy allocations", depID)
healthy = true
}
return
}
// triggerPeriodic is used to force an instance of a periodic job outside of the
// planned schedule. This results in an evalID being created that can then be
// checked in the same fashion as other jobs.
func (l *levantDeployment) triggerPeriodic(jobID *string) (evalID string, err error) {
log.Info().Msg("levant/deploy: triggering a run of periodic job")
// Trigger the run if possible and just returning both the evalID and the err.
// There is no need to check this here as the caller does this.
evalID, _, err = l.nomad.Jobs().PeriodicForce(*jobID, nil)
return
}
// getDeploymentID finds the Nomad deploymentID associated to a Nomad
// evaluationID. This is only needed as sometimes Nomad initially returns eval
// info with an empty deploymentID; and a retry is required in order to get the
// updated response from Nomad.
func (l *levantDeployment) getDeploymentID(evalID string) (depID string, err error) {
var evalInfo *nomad.Evaluation
for {
if evalInfo, _, err = l.nomad.Evaluations().Info(evalID, nil); err != nil {
return
}
if evalInfo.DeploymentID == "" {
log.Debug().Msgf("levant/deploy: Nomad returned an empty deployment for evaluation %v; retrying", evalID)
time.Sleep(2 * time.Second)
continue
} else {
break
}
}
return evalInfo.DeploymentID, nil
}
// dynamicGroupCountUpdater takes the templated and rendered job and updates the
// group counts based on the currently deployed job; if its running.
func (l *levantDeployment) dynamicGroupCountUpdater() error {
// Gather information about the current state, if any, of the job on the
// Nomad cluster.
rJob, _, err := l.nomad.Jobs().Info(*l.config.Template.Job.Name, &nomad.QueryOptions{})
// This is a hack due to GH-1849; we check the error string for 404 which
// indicates the job is not running, not that there was an error in the API
// call.
if err != nil && strings.Contains(err.Error(), "404") {
log.Info().Msg("levant/deploy: job is not running, using template file group counts")
return nil
} else if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to perform job evaluation")
return err
}
// Check that the job is actually running and not in a potentially stopped
// state.
if *rJob.Status != nomadStructs.JobStatusRunning {
return nil
}
log.Debug().Msgf("levant/deploy: running dynamic job count updater")
// Iterate the templated job and the Nomad returned job and update group count
// based on matches.
for _, rGroup := range rJob.TaskGroups {
for _, group := range l.config.Template.Job.TaskGroups {
if *rGroup.Name == *group.Name {
log.Info().Msgf("levant/deploy: using dynamic count %v for group %s",
*rGroup.Count, *group.Name)
group.Count = rGroup.Count
}
}
}
return nil
}
func (l *levantDeployment) isJobZeroCount() bool {
for _, tg := range l.config.Template.Job.TaskGroups {
if tg.Count == nil {
return false
} else if *tg.Count > 0 {
return false
}
}
return true
}
| [
"\"VAULT_TOKEN\""
]
| []
| [
"VAULT_TOKEN"
]
| [] | ["VAULT_TOKEN"] | go | 1 | 0 | |
slack/cli.py | #!/usr/bin/env python
import argparse
import json
import os
import sys
import requests
def main():
parser = argparse.ArgumentParser()
parser.add_argument('text', nargs='?', help='message you want to get delivered. wrap in quotes if it contains spaces. Use "-" to read from stdin.')
parser.add_argument('-w', '--webhook-url', help='webhook URL to use. if not given, the SLACK_WEBHOOK_URL environment variable needs to be set.', default=os.getenv('SLACK_WEBHOOK_URL'))
parser.add_argument('-c', '--channel', help='channel the message should be sent to. can also be set using the SLACK_CHANNEL environment variable. if not given, the channel configured for this webhook URL will be used.', default=os.getenv('SLACK_CHANNEL'))
parser.add_argument('-u', '--username', help='username that should be used as the sender. can also be set using the SLACK_USERNAME environment variable. if not given, the username configured for this webhook URL will be used.', default=os.getenv('SLACK_USERNAME'))
parser.add_argument('-i', '--icon-url', help='URL of an icon image to use. can also be set using the SLACK_ICON_URL environment variable.', default=os.getenv('SLACK_ICON_URL'))
parser.add_argument('-e', '--icon-emoji', help='Slack emoji to use as the icon, e.g. `:ghost:`. can also be set using the SLACK_ICON_EMOJI environment variable.', default=os.getenv('SLACK_ICON_EMOJI'))
parser.add_argument('-a', '--attachment', help='send message as a rich attachment', action='store_true', default=False)
parser.add_argument('-C', '--color', help='set the attachment color')
parser.add_argument('-t', '--title', help='set the attachment title')
parser.add_argument('-d', '--dump-json', help='do not post request but just print json body and exit', action='store_true', default=False)
args = parser.parse_args()
if args.webhook_url is None and not args.dump_json:
sys.stderr.write('No webhook URL given.\nEither use the -w/--webhook-url argument or the SLACK_WEBHOOK_URL environment variable.\n')
return 1
if args.text == '-' and not sys.stdin.isatty():
args.text = sys.stdin.read()
if args.text is None:
parser.print_help()
return 0
if args.attachment is not True:
payload = {
'text': args.text
}
else:
payload = {
'attachments': [
{
'fallback': args.text,
'color': args.color,
'fields': [
{
'title': args.title,
'value': args.text,
'short': False
}
]
}
]
}
if args.channel is not None:
if args.channel[0] not in ['#', '@']:
args.channel = '#' + args.channel
payload['channel'] = args.channel
if args.username is not None:
payload['username'] = args.username
if args.icon_url is not None:
payload['icon_url'] = args.icon_url
elif args.icon_emoji is not None:
payload['icon_emoji'] = args.icon_emoji
if args.dump_json:
sys.stdout.write('{0}\n'.format(json.dumps(payload)))
return 0
try:
res = requests.post(args.webhook_url, data=json.dumps(payload))
except Exception as e:
sys.stderr.write('An error occurred when trying to deliver the message:\n {0}'.format(e.message))
return 2
if not res.ok:
sys.stderr.write('Could not deliver the message. Slack says:\n {0}'.format(res.text))
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"SLACK_ICON_EMOJI",
"SLACK_CHANNEL",
"SLACK_USERNAME",
"SLACK_ICON_URL",
"SLACK_WEBHOOK_URL"
]
| [] | ["SLACK_ICON_EMOJI", "SLACK_CHANNEL", "SLACK_USERNAME", "SLACK_ICON_URL", "SLACK_WEBHOOK_URL"] | python | 5 | 0 | |
lib/streamer/jsonstreamer.go | /*
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// StreamJSONArray scans the JSON stream associated with 'decoder' to find
// an array value associated with the json element at 'pathToArray'.
// It then calls the 'cb' callback function so that it can decode one element
// in the stream at a time.
package streamer
import (
"encoding/json"
"fmt"
"os"
"reflect"
"strings"
"github.com/cloudflare/cfssl/api"
"github.com/cloudflare/cfssl/log"
"github.com/pkg/errors"
)
// SearchElement defines the JSON arrays for which to search
type SearchElement struct {
Path string
CB func(*json.Decoder) error
}
// StreamJSONArray searches the JSON stream for an array matching 'path'.
// For each element of this array, it streams one element at a time.
func StreamJSONArray(decoder *json.Decoder, path string, cb func(*json.Decoder) error) (bool, error) {
ses := []SearchElement{
SearchElement{Path: path, CB: cb},
SearchElement{Path: "errors", CB: errCB},
}
return StreamJSON(decoder, ses)
}
// StreamJSON searches the JSON stream for arrays matching a search element.
// For each array that it finds, it streams them one element at a time.
func StreamJSON(decoder *json.Decoder, search []SearchElement) (bool, error) {
js := &jsonStream{decoder: decoder, search: search, stack: []string{}}
err := js.stream()
return js.gotResults, err
}
type jsonStream struct {
decoder *json.Decoder
search []SearchElement
stack []string
gotResults bool
}
func (js *jsonStream) stream() error {
t, err := js.getToken()
if err != nil {
return err
}
if _, ok := t.(json.Delim); !ok {
return nil
}
path := strings.Join(js.stack, ".")
se := js.getSearchElement(path)
d := fmt.Sprintf("%s", t)
switch d {
case "[":
if se != nil {
for js.decoder.More() {
err = se.CB(js.decoder)
if err != nil {
return err
}
js.gotResults = true
}
}
err = js.skipToDelim("]")
if err != nil {
return err
}
case "]":
return errors.Errorf("Unexpected '%s'", d)
case "{":
if se != nil {
return errors.Errorf("Expecting array for value of '%s'", path)
}
for {
name, err := js.getNextName()
if err != nil {
return err
}
if name == "" {
return nil
}
stack := js.stack
js.stack = append(stack, name)
err = js.stream()
if err != nil {
return err
}
js.stack = stack
}
case "}":
return errors.Errorf("Unexpected '%s'", d)
default:
return errors.Errorf("unknown JSON delimiter: '%s'", d)
}
return nil
}
// Find a search element named 'path'
func (js *jsonStream) getSearchElement(path string) *SearchElement {
for _, ele := range js.search {
if ele.Path == path {
return &ele
}
}
return nil
}
// Skip over tokens until we hit the delimiter
func (js *jsonStream) skipToDelim(delim string) error {
for {
t, err := js.getToken()
if err != nil {
return err
}
// Skip anything that isn't a delimiter
if _, ok := t.(json.Delim); !ok {
continue
}
// It is a delimiter
d := fmt.Sprintf("%s", t)
if d == delim {
return nil
}
switch d {
case "[":
err = js.skipToDelim("]")
case "]":
err = errors.Errorf("Expecting '%s' but found '%s'", delim, d)
case "{":
err = js.skipToDelim("}")
case "}":
err = errors.Errorf("Expecting '%s' but found '%s'", delim, d)
default:
err = errors.Errorf("unknown JSON delimiter: '%s'", d)
}
if err != nil {
return err
}
}
}
func (js *jsonStream) getNextName() (string, error) {
token, err := js.getToken()
if err != nil {
return "", err
}
switch v := token.(type) {
case string:
return v, nil
case json.Delim:
d := v.String()
if d == "}" {
return "", nil
}
return "", errors.Errorf("Expecting '}' delimiter but found '%s'", d)
default:
return "", errors.Errorf("Expecting string or delimiter but found '%s'", v)
}
}
func (js *jsonStream) getToken() (interface{}, error) {
token, err := js.decoder.Token()
if os.Getenv("FABRIC_CA_JSON_STREAM_DEBUG") != "" {
log.Debugf("TOKEN: type=%s, %+v\n", reflect.TypeOf(token), token)
}
return token, err
}
func errCB(decoder *json.Decoder) error {
errMsg := &api.ResponseMessage{}
err := decoder.Decode(errMsg)
if err != nil {
return errors.Errorf("Invalid JSON error format: %s", err)
}
return errors.Errorf("%+v", errMsg)
}
| [
"\"FABRIC_CA_JSON_STREAM_DEBUG\""
]
| []
| [
"FABRIC_CA_JSON_STREAM_DEBUG"
]
| [] | ["FABRIC_CA_JSON_STREAM_DEBUG"] | go | 1 | 0 | |
acceptance/acceptance_test.go | // +build acceptance
package acceptance
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
pubcfg "github.com/buildpacks/pack/config"
"github.com/ghodss/yaml"
"github.com/pelletier/go-toml"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/go-connections/nat"
"github.com/google/go-containerregistry/pkg/name"
"github.com/pkg/errors"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/pack/acceptance/buildpacks"
"github.com/buildpacks/pack/acceptance/assertions"
"github.com/buildpacks/pack/acceptance/config"
"github.com/buildpacks/pack/acceptance/invoke"
"github.com/buildpacks/pack/internal/archive"
"github.com/buildpacks/pack/internal/cache"
"github.com/buildpacks/pack/internal/style"
h "github.com/buildpacks/pack/testhelpers"
)
const (
runImage = "pack-test/run"
buildImage = "pack-test/build"
)
var (
dockerCli client.CommonAPIClient
registryConfig *h.TestRegistryConfig
suiteManager *SuiteManager
)
func TestAcceptance(t *testing.T) {
var err error
h.RequireDocker(t)
rand.Seed(time.Now().UTC().UnixNano())
assert := h.NewAssertionManager(t)
dockerCli, err = client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.38"))
assert.Nil(err)
registryConfig = h.RunRegistry(t)
defer registryConfig.StopRegistry(t)
inputConfigManager, err := config.NewInputConfigurationManager()
assert.Nil(err)
assetsConfig := config.ConvergedAssetManager(t, assert, inputConfigManager)
suiteManager = &SuiteManager{out: t.Logf}
suite := spec.New("acceptance suite", spec.Report(report.Terminal{}))
if inputConfigManager.Combinations().IncludesCurrentSubjectPack() {
suite("p_current", func(t *testing.T, when spec.G, it spec.S) {
testWithoutSpecificBuilderRequirement(
t,
when,
it,
assetsConfig.NewPackAsset(config.Current),
)
}, spec.Report(report.Terminal{}))
}
for _, combo := range inputConfigManager.Combinations() {
t.Logf(`setting up run combination %s: %s`,
style.Symbol(combo.String()),
combo.Describe(assetsConfig),
)
suite(combo.String(), func(t *testing.T, when spec.G, it spec.S) {
testAcceptance(
t,
when,
it,
assetsConfig.NewPackAsset(combo.Pack),
assetsConfig.NewPackAsset(combo.PackCreateBuilder),
assetsConfig.NewLifecycleAsset(combo.Lifecycle),
)
}, spec.Report(report.Terminal{}))
}
suite.Run(t)
assert.Nil(suiteManager.CleanUp())
}
// These tests either (a) do not require a builder or (b) do not require a specific builder to be provided
// in order to test compatibility.
// They should only be run against the "current" (i.e., main) version of pack.
func testWithoutSpecificBuilderRequirement(
t *testing.T,
when spec.G,
it spec.S,
packConfig config.PackAsset,
) {
var (
pack *invoke.PackInvoker
assert = h.NewAssertionManager(t)
buildpackManager buildpacks.BuildpackManager
)
it.Before(func() {
pack = invoke.NewPackInvoker(t, assert, packConfig, registryConfig.DockerConfigDir)
pack.EnableExperimental()
buildpackManager = buildpacks.NewBuildpackManager(t, assert)
})
it.After(func() {
pack.Cleanup()
})
when("invalid subcommand", func() {
it("prints usage", func() {
output, err := pack.Run("some-bad-command")
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsCommandUnknown("some-bad-command")
assertOutput.IncludesUsagePrompt()
})
})
when("suggest-builders", func() {
it("displays suggested builders", func() {
output := pack.RunSuccessfully("suggest-builders")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesSuggestedBuildersHeading()
assertOutput.IncludesPrefixedGoogleBuilder()
assertOutput.IncludesPrefixedHerokuBuilder()
assertOutput.IncludesPrefixedPaketoBuilders()
})
})
when("suggest-stacks", func() {
it("displays suggested stacks", func() {
output, err := pack.Run("suggest-stacks")
assert.NilWithMessage(err, fmt.Sprintf("suggest-stacks command failed with output %s", output))
assertions.NewOutputAssertionManager(t, output).IncludesSuggestedStacksHeading()
})
})
when("set-default-builder", func() {
it("sets the default-stack-id in ~/.pack/config.toml", func() {
builderName := "paketobuildpacks/builder:base"
output := pack.RunSuccessfully("set-default-builder", builderName)
assertions.NewOutputAssertionManager(t, output).ReportsSettingDefaultBuilder(builderName)
})
})
when("trust-builder", func() {
it("sets the builder as trusted in ~/.pack/config.toml", func() {
h.SkipUnless(t, pack.Supports("trust-builder"), "pack does not support 'trust-builder'")
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("trust-builder", builderName)
assert.Contains(pack.ConfigFileContents(), builderName)
})
})
when("untrust-builder", func() {
it("removes the previously trusted builder from ~/${PACK_HOME}/config.toml", func() {
h.SkipUnless(t, pack.Supports("untrust-builder"), "pack does not support 'untrust-builder'")
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("trust-builder", builderName)
assert.Contains(pack.ConfigFileContents(), builderName)
pack.JustRunSuccessfully("untrust-builder", builderName)
assert.NotContains(pack.ConfigFileContents(), builderName)
})
})
when("list-trusted-builders", func() {
it.Before(func() {
h.SkipUnless(t,
pack.Supports("list-trusted-builders"),
"pack does not support 'list-trusted-builders",
)
})
it("shows default builders from pack suggest-builders", func() {
output := pack.RunSuccessfully("list-trusted-builders")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesTrustedBuildersHeading()
assertOutput.IncludesHerokuBuilder()
assertOutput.IncludesGoogleBuilder()
assertOutput.IncludesPaketoBuilders()
})
it("shows a builder trusted by pack trust-builder", func() {
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("trust-builder", builderName)
output := pack.RunSuccessfully("list-trusted-builders")
assert.Contains(output, builderName)
})
})
when("package-buildpack", func() {
var (
tmpDir string
buildpackManager buildpacks.BuildpackManager
simplePackageConfigFixtureName = "package.toml"
)
it.Before(func() {
h.SkipUnless(t,
pack.Supports("package-buildpack"),
"pack does not support 'package-buildpack'",
)
var err error
tmpDir, err = ioutil.TempDir("", "package-buildpack-tests")
assert.Nil(err)
buildpackManager = buildpacks.NewBuildpackManager(t, assert)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.SimpleLayersParent, buildpacks.SimpleLayers)
})
it.After(func() {
assert.Nil(os.RemoveAll(tmpDir))
})
assertImageExistsLocally := func(name string) {
t.Helper()
_, _, err := dockerCli.ImageInspectWithRaw(context.Background(), name)
assert.Nil(err)
}
generateAggregatePackageToml := func(buildpackURI, nestedPackageName, os string) string {
t.Helper()
packageTomlFile, err := ioutil.TempFile(tmpDir, "package_aggregate-*.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
"package_aggregate.toml",
packageTomlFile,
map[string]interface{}{
"BuildpackURI": buildpackURI,
"PackageName": nestedPackageName,
"OS": os,
},
)
assert.Nil(packageTomlFile.Close())
return packageTomlFile.Name()
}
when("no --format is provided", func() {
it("creates the package as image", func() {
packageName := "test/package-" + h.RandString(10)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, dockerHostOS())
output := pack.RunSuccessfully("package-buildpack", packageName, "-c", packageTomlPath)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(packageName)
defer h.DockerRmi(dockerCli, packageName)
assertImageExistsLocally(packageName)
})
})
when("--format image", func() {
it("creates the package", func() {
t.Log("package w/ only buildpacks")
nestedPackageName := "test/package-" + h.RandString(10)
packageName := "test/package-" + h.RandString(10)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, dockerHostOS())
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, dockerHostOS())
packageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageName,
aggregatePackageToml,
buildpacks.WithRequiredBuildpacks(
buildpacks.SimpleLayersParent,
buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
),
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageBuildpack)
defer h.DockerRmi(dockerCli, nestedPackageName, packageName)
assertImageExistsLocally(nestedPackageName)
assertImageExistsLocally(packageName)
})
when("--publish", func() {
it("publishes image to registry", func() {
h.SkipIf(t, !pack.Supports("package-buildpack --os"), "os not supported")
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, dockerHostOS())
nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10))
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
buildpacks.WithPublish(),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
defer h.DockerRmi(dockerCli, nestedPackageName)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, dockerHostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
output := pack.RunSuccessfully(
"package-buildpack", packageName,
"-c", aggregatePackageToml,
"--publish",
)
defer h.DockerRmi(dockerCli, packageName)
assertions.NewOutputAssertionManager(t, output).ReportsPackagePublished(packageName)
_, _, err := dockerCli.ImageInspectWithRaw(context.Background(), packageName)
assert.ErrorContains(err, "No such image")
assert.Nil(h.PullImageWithAuth(dockerCli, packageName, registryConfig.RegistryAuth()))
_, _, err = dockerCli.ImageInspectWithRaw(context.Background(), packageName)
assert.Nil(err)
})
})
when("--pull-policy=never", func() {
it("should use local image", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, dockerHostOS())
nestedPackageName := "test/package-" + h.RandString(10)
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
defer h.DockerRmi(dockerCli, nestedPackageName)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, dockerHostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
defer h.DockerRmi(dockerCli, packageName)
pack.JustRunSuccessfully(
"package-buildpack", packageName,
"-c", aggregatePackageToml,
"--pull-policy", pubcfg.PullNever.String(),
)
_, _, err := dockerCli.ImageInspectWithRaw(context.Background(), packageName)
assert.Nil(err)
})
it("should not pull image from registry", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, dockerHostOS())
nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10))
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithPublish(),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
defer h.DockerRmi(dockerCli, nestedPackageName)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, dockerHostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
defer h.DockerRmi(dockerCli, packageName)
output, err := pack.Run(
"package-buildpack", packageName,
"-c", aggregatePackageToml,
"--pull-policy", pubcfg.PullNever.String(),
)
assert.NotNil(err)
assertions.NewOutputAssertionManager(t, output).ReportsImageNotExistingOnDaemon(nestedPackageName)
})
})
})
when("--format file", func() {
it.Before(func() {
h.SkipIf(t, !pack.Supports("package-buildpack --format"), "format not supported")
})
it("creates the package", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, dockerHostOS())
destinationFile := filepath.Join(tmpDir, "package.cnb")
output, err := pack.Run(
"package-buildpack", destinationFile,
"--format", "file",
"-c", packageTomlPath,
)
assert.Nil(err)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(destinationFile)
h.AssertTarball(t, destinationFile)
})
})
when("package.toml is invalid", func() {
it("displays an error", func() {
output, err := pack.Run(
"package-buildpack", "some-package",
"-c", pack.FixtureManager().FixtureLocation("invalid_package.toml"),
)
assert.NotNil(err)
assert.Contains(output, "reading config")
})
})
})
when("report", func() {
it.Before(func() {
h.SkipIf(t, !pack.Supports("report"), "pack does not support 'report' command")
})
when("default builder is set", func() {
it("redacts default builder", func() {
pack.RunSuccessfully("set-default-builder", "paketobuildpacks/builder:base")
output := pack.RunSuccessfully("report")
version := pack.Version()
expectedOutput := pack.FixtureManager().TemplateFixture(
"report_output.txt",
map[string]interface{}{
"DefaultBuilder": "[REDACTED]",
"Version": version,
"OS": runtime.GOOS,
"Arch": runtime.GOARCH,
},
)
assert.Equal(output, expectedOutput)
})
it("explicit mode doesn't redact", func() {
pack.RunSuccessfully("set-default-builder", "paketobuildpacks/builder:base")
output := pack.RunSuccessfully("report", "--explicit")
version := pack.Version()
expectedOutput := pack.FixtureManager().TemplateFixture(
"report_output.txt",
map[string]interface{}{
"DefaultBuilder": "paketobuildpacks/builder:base",
"Version": version,
"OS": runtime.GOOS,
"Arch": runtime.GOARCH,
},
)
assert.Equal(output, expectedOutput)
})
})
})
when("build with default builders not set", func() {
it("informs the user", func() {
output, err := pack.Run(
"build", "some/image",
"-p", filepath.Join("testdata", "mock_app"),
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesMessageToSetDefaultBuilder()
assertOutput.IncludesPrefixedGoogleBuilder()
assertOutput.IncludesPrefixedHerokuBuilder()
assertOutput.IncludesPrefixedPaketoBuilders()
})
})
when("inspect-buildpack", func() {
var tmpDir string
it.Before(func() {
h.SkipUnless(t, pack.Supports("inspect-buildpack"), "version of pack doesn't support the 'inspect-buildpack' command")
var err error
tmpDir, err = ioutil.TempDir("", "inspect-buildpack-tests")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
when("buildpack archive", func() {
when("inspect-buildpack", func() {
it("succeeds", func() {
packageFileLocation := filepath.Join(
tmpDir,
fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)),
)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", dockerHostOS())
packageFile := buildpacks.NewPackageFile(
t,
pack,
packageFileLocation,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageFile)
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_buildpack_output.txt",
map[string]interface{}{
"buildpack_source": "LOCAL ARCHIVE",
"buildpack_name": packageFileLocation,
},
)
output := pack.RunSuccessfully("inspect-buildpack", packageFileLocation)
assert.TrimmedEq(output, expectedOutput)
})
})
})
when("buildpack image", func() {
when("inspect-buildpack", func() {
it("succeeds", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", dockerHostOS())
packageImageName := registryConfig.RepoName("buildpack-" + h.RandString(8))
packageImage := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageImage)
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_buildpack_output.txt",
map[string]interface{}{
"buildpack_source": "LOCAL IMAGE",
"buildpack_name": packageImageName,
},
)
output := pack.RunSuccessfully("inspect-buildpack", packageImageName)
assert.TrimmedEq(output, expectedOutput)
})
})
})
})
}
func testAcceptance(
t *testing.T,
when spec.G,
it spec.S,
subjectPackConfig, createBuilderPackConfig config.PackAsset,
lifecycle config.LifecycleAsset,
) {
var (
pack, createBuilderPack *invoke.PackInvoker
buildpackManager buildpacks.BuildpackManager
bpDir = buildpacksDir(lifecycle.EarliestBuildpackAPIVersion())
assert = h.NewAssertionManager(t)
)
it.Before(func() {
pack = invoke.NewPackInvoker(t, assert, subjectPackConfig, registryConfig.DockerConfigDir)
pack.EnableExperimental()
createBuilderPack = invoke.NewPackInvoker(t, assert, createBuilderPackConfig, registryConfig.DockerConfigDir)
createBuilderPack.EnableExperimental()
buildpackManager = buildpacks.NewBuildpackManager(
t,
assert,
buildpacks.WithBuildpackAPIVersion(lifecycle.EarliestBuildpackAPIVersion()),
)
})
it.After(func() {
pack.Cleanup()
createBuilderPack.Cleanup()
})
when("stack is created", func() {
var (
runImageMirror string
)
it.Before(func() {
value, err := suiteManager.RunTaskOnceString("create-stack",
func() (string, error) {
runImageMirror := registryConfig.RepoName(runImage)
err := createStack(t, dockerCli, runImageMirror)
if err != nil {
return "", err
}
return runImageMirror, nil
})
assert.Nil(err)
suiteManager.RegisterCleanUp("remove-stack-images", func() error {
return h.DockerRmi(dockerCli, runImage, buildImage, value)
})
runImageMirror = value
})
when("builder is created", func() {
var builderName string
it.Before(func() {
key := taskKey(
"create-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createBuilder(t, assert, createBuilderPack, lifecycle, buildpackManager, runImageMirror)
})
assert.Nil(err)
suiteManager.RegisterCleanUp("clean-"+key, func() error {
return h.DockerRmi(dockerCli, value)
})
builderName = value
})
when("builder.toml is invalid", func() {
it("displays an error", func() {
h.SkipUnless(
t,
createBuilderPack.SupportsFeature(invoke.BuilderTomlValidation),
"builder.toml validation not supported",
)
builderConfigPath := createBuilderPack.FixtureManager().FixtureLocation("invalid_builder.toml")
output, err := pack.Run(
"create-builder", "some-builder:build",
"--config", builderConfigPath,
)
assert.NotNil(err)
assert.Contains(output, "invalid builder toml")
})
})
when("build", func() {
var repo, repoName string
it.Before(func() {
repo = "some-org/" + h.RandString(10)
repoName = registryConfig.RepoName(repo)
})
it.After(func() {
h.DockerRmi(dockerCli, repoName)
ref, err := name.ParseReference(repoName, name.WeakValidation)
assert.Nil(err)
cacheImage := cache.NewImageCache(ref, dockerCli)
buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli)
launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli)
cacheImage.Clear(context.TODO())
buildCacheVolume.Clear(context.TODO())
launchCacheVolume.Clear(context.TODO())
})
when("builder is untrusted", func() {
var untrustedBuilderName string
it.Before(func() {
var err error
untrustedBuilderName, err = createBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
assert.Nil(err)
})
it.After(func() {
h.DockerRmi(dockerCli, untrustedBuilderName)
})
it("uses the 5 phases", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"-B", untrustedBuilderName,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
if pack.SupportsFeature(invoke.CreatorInPack) {
assertOutput.IncludesLifecycleImageTag()
}
assertOutput.IncludesSeparatePhases()
})
})
when("default builder is set", func() {
var usingCreator bool
it.Before(func() {
pack.JustRunSuccessfully("set-default-builder", builderName)
var trustBuilder bool
if pack.Supports("trust-builder") {
pack.JustRunSuccessfully("trust-builder", builderName)
trustBuilder = true
}
// Technically the creator is supported as of platform API version 0.3 (lifecycle version 0.7.0+) but earlier versions
// have bugs that make using the creator problematic.
creatorSupported := lifecycle.SupportsFeature(config.CreatorInLifecycle) &&
pack.SupportsFeature(invoke.CreatorInPack)
usingCreator = creatorSupported && trustBuilder
})
it("creates a runnable, rebuildable image on daemon from app dir", func() {
appPath := filepath.Join("testdata", "mock_app")
output := pack.RunSuccessfully(
"build", repoName,
"-p", appPath,
)
imgId, err := imgIDForRepoName(repoName)
if err != nil {
t.Fatal(err)
}
defer h.DockerRmi(dockerCli, imgId)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsUsingBuildCacheVolume()
assertOutput.ReportsSelectingRunImageMirror(runImageMirror)
t.Log("app is runnable")
assertMockAppRunsWithOutput(t, assert, repoName, "Launch Dep Contents", "Cached Dep Contents")
t.Log("it uses the run image as a base image")
assertHasBase(t, assert, repoName, runImage)
t.Log("sets the run image metadata")
appMetadataLabel := imageLabel(t,
assert,
dockerCli,
repoName,
"io.buildpacks.lifecycle.metadata",
)
assert.Contains(appMetadataLabel, fmt.Sprintf(`"stack":{"runImage":{"image":"%s","mirrors":["%s"]}}}`, runImage, runImageMirror))
t.Log("registry is empty")
contents, err := registryConfig.RegistryCatalog()
assert.Nil(err)
if strings.Contains(contents, repo) {
t.Fatalf("Should not have published image without the '--publish' flag: got %s", contents)
}
t.Log("add a local mirror")
localRunImageMirror := registryConfig.RepoName("pack-test/run-mirror")
assert.Succeeds(dockerCli.ImageTag(context.TODO(), runImage, localRunImageMirror))
defer h.DockerRmi(dockerCli, localRunImageMirror)
pack.JustRunSuccessfully("set-run-image-mirrors", runImage, "-m", localRunImageMirror)
t.Log("rebuild")
output = pack.RunSuccessfully(
"build", repoName,
"-p", appPath,
)
assertOutput.ReportsSuccessfulImageBuild(repoName)
imgId, err = imgIDForRepoName(repoName)
if err != nil {
t.Fatal(err)
}
defer h.DockerRmi(dockerCli, imgId)
assertOutput = assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror)
cachedLaunchLayer := "simple/layers:cached-launch-layer"
assertLifecycleOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
assertLifecycleOutput.ReportsRestoresCachedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsCacheReuse(cachedLaunchLayer)
t.Log("app is runnable")
assertMockAppRunsWithOutput(t, assert, repoName, "Launch Dep Contents", "Cached Dep Contents")
t.Log("rebuild with --clear-cache")
output = pack.RunSuccessfully("build", repoName, "-p", appPath, "--clear-cache")
assertOutput = assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
if !usingCreator {
assertOutput.ReportsSkippingRestore()
}
assertLifecycleOutput = assertions.NewLifecycleOutputAssertionManager(t, output)
assertLifecycleOutput.ReportsSkippingBuildpackLayerAnalysis()
assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsCacheCreation(cachedLaunchLayer)
t.Log("cacher adds layers")
assert.Matches(output, regexp.MustCompile(`(?i)Adding cache layer 'simple/layers:cached-launch-layer'`))
if pack.Supports("inspect-image") {
t.Log("inspect-image")
output = pack.RunSuccessfully("inspect-image", repoName)
var (
webCommand string
helloCommand string
helloArgs string
)
if dockerHostOS() == "windows" {
webCommand = ".\\run"
helloCommand = "cmd"
helloArgs = " /c echo hello world"
} else {
webCommand = "./run"
helloCommand = "echo"
helloArgs = "hello world"
}
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_image_local_output.txt",
map[string]interface{}{
"image_name": repoName,
"base_image_id": h.ImageID(t, runImageMirror),
"base_image_top_layer": h.TopLayerDiffID(t, runImageMirror),
"run_image_local_mirror": localRunImageMirror,
"run_image_mirror": runImageMirror,
"web_command": webCommand,
"hello_command": helloCommand,
"hello_args": helloArgs,
},
)
assert.Equal(output, expectedOutput)
}
})
when("--no-color", func() {
it.Before(func() {
h.SkipUnless(t,
pack.SupportsFeature(invoke.NoColorInBuildpacks),
"pack had a no-color bug for color strings in buildpacks until 0.12.0",
)
})
it("doesn't have color", func() {
appPath := filepath.Join("testdata", "mock_app")
// --no-color is set as a default option in our tests, and doesn't need to be explicitly provided
output := pack.RunSuccessfully("build", repoName, "-p", appPath)
imgId, err := imgIDForRepoName(repoName)
if err != nil {
t.Fatal(err)
}
defer h.DockerRmi(dockerCli, imgId)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.WithoutColors()
})
})
when("--quiet", func() {
it.Before(func() {
h.SkipUnless(t,
pack.SupportsFeature(invoke.QuietMode),
"pack had a bug for quiet mode until 0.13.2",
)
})
it("only logs app name and sha", func() {
appPath := filepath.Join("testdata", "mock_app")
pack.SetVerbose(false)
defer pack.SetVerbose(true)
output := pack.RunSuccessfully("build", repoName, "-p", appPath, "--quiet")
imgId, err := imgIDForRepoName(repoName)
if err != nil {
t.Fatal(err)
}
defer h.DockerRmi(dockerCli, imgId)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportSuccessfulQuietBuild(repoName)
})
})
it("supports building app from a zip file", func() {
appPath := filepath.Join("testdata", "mock_app.zip")
output := pack.RunSuccessfully("build", repoName, "-p", appPath)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
imgId, err := imgIDForRepoName(repoName)
if err != nil {
t.Fatal(err)
}
defer h.DockerRmi(dockerCli, imgId)
})
when("--network", func() {
var tmpDir string
it.Before(func() {
h.SkipUnless(t,
pack.Supports("build --network"),
"--network flag not supported for build",
)
var err error
tmpDir, err = ioutil.TempDir("", "archive-buildpacks-")
assert.Nil(err)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.InternetCapable)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
assert.Succeeds(h.DockerRmi(dockerCli, repoName))
})
when("the network mode is not provided", func() {
it("reports buildpack access to internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsConnectedToInternet()
})
})
when("the network mode is set to default", func() {
it("reports buildpack access to internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
"--network", "default",
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsConnectedToInternet()
})
})
when("the network mode is set to none", func() {
it("reports buildpack disconnected from internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
"--network", "none",
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsDisconnectedFromInternet()
})
})
})
when("--volume", func() {
var (
volumeRoot = "/"
slash = "/"
tmpDir string
tmpVolumeSrc string
)
it.Before(func() {
h.SkipIf(t, os.Getenv("DOCKER_HOST") != "", "cannot mount volume when DOCKER_HOST is set")
h.SkipUnless(t,
pack.SupportsFeature(invoke.ReadWriteVolumeMounts),
"pack version does not support read/write volume mounts",
)
if dockerHostOS() == "windows" {
volumeRoot = `c:\`
slash = `\`
}
var err error
tmpDir, err = ioutil.TempDir("", "volume-buildpack-tests-")
assert.Nil(err)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ReadVolume, buildpacks.ReadWriteVolume)
tmpVolumeSrc, err = ioutil.TempDir("", "volume-mount-source")
assert.Nil(err)
assert.Succeeds(os.Chmod(tmpVolumeSrc, 0777)) // Override umask
// Some OSes (like macOS) use symlinks for the standard temp dir.
// Resolve it so it can be properly mounted by the Docker daemon.
tmpVolumeSrc, err = filepath.EvalSymlinks(tmpVolumeSrc)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tmpVolumeSrc, "some-file"), []byte("some-content\n"), 0777)
assert.Nil(err)
})
it.After(func() {
_ = h.DockerRmi(dockerCli, repoName)
_ = os.RemoveAll(tmpDir)
_ = os.RemoveAll(tmpVolumeSrc)
})
when("volume is read-only", func() {
it("mounts the provided volume in the detect and build phases", func() {
volumeDest := volumeRoot + "platform" + slash + "volume-mount-target"
testFilePath := volumeDest + slash + "some-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadVolume.FullPathIn(tmpDir),
"--env", "TEST_FILE_PATH="+testFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsReadingFileContents("Detect", testFilePath, "some-content")
bpOutputAsserts.ReportsReadingFileContents("Build", testFilePath, "some-content")
})
it("should fail to write", func() {
volumeDest := volumeRoot + "platform" + slash + "volume-mount-target"
testDetectFilePath := volumeDest + slash + "detect-file"
testBuildFilePath := volumeDest + slash + "build-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir),
"--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath,
"--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsFailingToWriteFileContents("Detect", testDetectFilePath)
bpOutputAsserts.ReportsFailingToWriteFileContents("Build", testBuildFilePath)
})
})
when("volume is read-write", func() {
it("can be written to", func() {
volumeDest := volumeRoot + "volume-mount-target"
testDetectFilePath := volumeDest + slash + "detect-file"
testBuildFilePath := volumeDest + slash + "build-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s:rw", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir),
"--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath,
"--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsWritingFileContents("Detect", testDetectFilePath)
bpOutputAsserts.ReportsReadingFileContents("Detect", testDetectFilePath, "some-content")
bpOutputAsserts.ReportsWritingFileContents("Build", testBuildFilePath)
bpOutputAsserts.ReportsReadingFileContents("Build", testBuildFilePath, "some-content")
})
})
})
when("--default-process", func() {
it("sets the default process from those in the process list", func() {
pack.RunSuccessfully(
"build", repoName,
"--default-process", "hello",
"-p", filepath.Join("testdata", "mock_app"),
)
assertMockAppLogs(t, assert, repoName, "hello world")
})
})
when("--buildpack", func() {
when("the argument is an ID", func() {
it("adds the buildpacks to the builder if necessary and runs them", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", "simple/layers", // can omit version if only one
"--buildpack", "[email protected]",
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertTestAppOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertTestAppOutput.ReportsBuildStep("Simple Layers Buildpack")
assertTestAppOutput.ReportsBuildStep("NOOP Buildpack")
assertOutput.ReportsSuccessfulImageBuild(repoName)
t.Log("app is runnable")
assertMockAppRunsWithOutput(t,
assert,
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
})
})
when("the argument is an archive", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "archive-buildpack-tests-")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
it("adds the buildpack to the builder and runs it", func() {
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ArchiveNotInBuilder)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.ArchiveNotInBuilder.FullPathIn(tmpDir),
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Local Buildpack")
})
})
when("the argument is directory", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "folder-buildpack-tests-")
assert.Nil(err)
})
it.After(func() {
_ = os.RemoveAll(tmpDir)
})
it("adds the buildpacks to the builder and runs it", func() {
h.SkipIf(t, runtime.GOOS == "windows", "buildpack directories not supported on windows")
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.FolderNotInBuilder)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.FolderNotInBuilder.FullPathIn(tmpDir),
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Local Buildpack")
})
})
when("the argument is a buildpackage image", func() {
var (
tmpDir string
packageImageName string
)
it.Before(func() {
h.SkipUnless(t,
pack.Supports("package-buildpack --os"),
"--buildpack does not accept buildpackage unless package-buildpack --os is supported",
)
})
it.After(func() {
_ = h.DockerRmi(dockerCli, packageImageName)
_ = os.RemoveAll(tmpDir)
})
it("adds the buildpacks to the builder and runs them", func() {
packageImageName = registryConfig.RepoName("buildpack-" + h.RandString(8))
packageImage := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
pack.FixtureManager().FixtureLocation("package_for_build_cmd.toml"),
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageImage)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", packageImageName,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack(
"simple/layers/parent",
"simple-layers-parent-version",
)
assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack")
})
})
when("the argument is a buildpackage file", func() {
var tmpDir string
it.Before(func() {
h.SkipUnless(t,
pack.Supports("package-buildpack --os"),
"--buildpack does not accept buildpackage unless package-buildpack --os is supported",
)
var err error
tmpDir, err = ioutil.TempDir("", "package-file")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
it("adds the buildpacks to the builder and runs them", func() {
packageFileLocation := filepath.Join(
tmpDir,
fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)),
)
packageFile := buildpacks.NewPackageFile(
t,
pack,
packageFileLocation,
pack.FixtureManager().FixtureLocation("package_for_build_cmd.toml"),
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageFile)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", packageFileLocation,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack(
"simple/layers/parent",
"simple-layers-parent-version",
)
assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack")
})
})
when("the buildpack stack doesn't match the builder", func() {
var otherStackBuilderTgz string
it.Before(func() {
otherStackBuilderTgz = h.CreateTGZ(t, filepath.Join(bpDir, "other-stack-buildpack"), "./", 0755)
})
it.After(func() {
assert.Succeeds(os.Remove(otherStackBuilderTgz))
})
it("errors", func() {
output, err := pack.Run(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", otherStackBuilderTgz,
)
assert.NotNil(err)
assert.Contains(output, "other/stack/bp")
assert.Contains(output, "other-stack-version")
assert.Contains(output, "does not support stack 'pack.test.stack'")
})
})
})
when("--env-file", func() {
var envPath string
it.Before(func() {
envfile, err := ioutil.TempFile("", "envfile")
assert.Nil(err)
defer envfile.Close()
err = os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment")
assert.Nil(err)
envfile.WriteString(`
DETECT_ENV_BUILDPACK=true
ENV1_CONTENTS=Env1 Layer Contents From File
ENV2_CONTENTS
`)
envPath = envfile.Name()
})
it.After(func() {
assert.Succeeds(os.Unsetenv("ENV2_CONTENTS"))
assert.Succeeds(os.RemoveAll(envPath))
})
it("provides the env vars to the build and detect steps", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--env-file", envPath,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertMockAppRunsWithOutput(t,
assert,
repoName,
"Env2 Layer Contents From Environment",
"Env1 Layer Contents From File",
)
})
})
when("--env", func() {
it.Before(func() {
assert.Succeeds(os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment"))
})
it.After(func() {
assert.Succeeds(os.Unsetenv("ENV2_CONTENTS"))
})
it("provides the env vars to the build and detect steps", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--env", "DETECT_ENV_BUILDPACK=true",
"--env", `ENV1_CONTENTS="Env1 Layer Contents From Command Line"`,
"--env", "ENV2_CONTENTS",
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertMockAppRunsWithOutput(t,
assert,
repoName,
"Env2 Layer Contents From Environment",
"Env1 Layer Contents From Command Line",
)
})
})
when("--run-image", func() {
var runImageName string
when("the run-image has the correct stack ID", func() {
it.Before(func() {
user := func() string {
if dockerHostOS() == "windows" {
return "ContainerAdministrator"
}
return "root"
}
runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(`
FROM %s
USER %s
RUN echo "custom-run" > /custom-run.txt
USER pack
`, runImage, user()))
})
it.After(func() {
h.DockerRmi(dockerCli, runImageName)
})
it("uses the run image as the base image", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--run-image", runImageName,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsPullingImage(runImageName)
t.Log("app is runnable")
assertMockAppRunsWithOutput(t,
assert,
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
t.Log("uses the run image as the base image")
assertHasBase(t, assert, repoName, runImageName)
})
})
when("the run image has the wrong stack ID", func() {
it.Before(func() {
runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(`
FROM %s
LABEL io.buildpacks.stack.id=other.stack.id
USER pack
`, runImage))
})
it.After(func() {
h.DockerRmi(dockerCli, runImageName)
})
it("fails with a message", func() {
output, err := pack.Run(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--run-image", runImageName,
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsRunImageStackNotMatchingBuilder(
"other.stack.id",
"pack.test.stack",
)
})
})
})
when("--publish", func() {
it("creates image on the registry", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--publish",
}
if dockerHostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
t.Log("checking that registry has contents")
contents, err := registryConfig.RegistryCatalog()
assert.Nil(err)
if !strings.Contains(contents, repo) {
t.Fatalf("Expected to see image %s in %s", repo, contents)
}
assert.Succeeds(h.PullImageWithAuth(dockerCli, repoName, registryConfig.RegistryAuth()))
defer h.DockerRmi(dockerCli, repoName)
t.Log("app is runnable")
assertMockAppRunsWithOutput(t,
assert,
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
if pack.Supports("inspect-image") {
t.Log("inspect-image")
output = pack.RunSuccessfully("inspect-image", repoName)
var (
webCommand string
helloCommand string
helloArgs string
)
if dockerHostOS() == "windows" {
webCommand = ".\\run"
helloCommand = "cmd"
helloArgs = " /c echo hello world"
} else {
webCommand = "./run"
helloCommand = "echo"
helloArgs = "hello world"
}
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_image_published_output.txt",
map[string]interface{}{
"image_name": repoName,
"base_image_ref": strings.Join([]string{runImageMirror, h.Digest(t, runImageMirror)}, "@"),
"base_image_top_layer": h.TopLayerDiffID(t, runImageMirror),
"run_image_mirror": runImageMirror,
"web_command": webCommand,
"hello_command": helloCommand,
"hello_args": helloArgs,
},
)
assert.Equal(output, expectedOutput)
}
})
})
when("ctrl+c", func() {
it("stops the execution", func() {
var buf = new(bytes.Buffer)
command := pack.StartWithWriter(
buf,
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
)
go command.TerminateAtStep("DETECTING")
err := command.Wait()
assert.NotNil(err)
assert.NotContains(buf.String(), "Successfully built image")
})
})
when("--descriptor", func() {
when("exclude and include", func() {
var buildpackTgz, tempAppDir string
it.Before(func() {
h.SkipUnless(t,
pack.SupportsFeature(invoke.ExcludeAndIncludeDescriptor),
"pack --descriptor does NOT support 'exclude' and 'include' feature",
)
buildpackTgz = h.CreateTGZ(t, filepath.Join(bpDir, "descriptor-buildpack"), "./", 0755)
var err error
tempAppDir, err = ioutil.TempDir("", "descriptor-app")
assert.Nil(err)
// Create test directories and files:
//
// ├── cookie.jar
// ├── secrets
// │ ├── api_keys.json
// | |── user_token
// ├── media
// │ ├── mountain.jpg
// │ └── person.png
// └── test.sh
err = os.Mkdir(filepath.Join(tempAppDir, "secrets"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "api_keys.json"), []byte("{}"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "user_token"), []byte("token"), 0755)
assert.Nil(err)
err = os.Mkdir(filepath.Join(tempAppDir, "media"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "mountain.jpg"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "person.png"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "test.sh"), []byte("echo test"), 0755)
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tempAppDir))
})
it("should exclude ALL specified files and directories", func() {
projectToml := `
[project]
name = "exclude test"
[[project.licenses]]
type = "MIT"
[build]
exclude = [ "*.sh", "secrets/", "media/metadata" ]
`
excludeDescriptorPath := filepath.Join(tempAppDir, "exclude.toml")
err := ioutil.WriteFile(excludeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
"--buildpack", buildpackTgz,
"--descriptor", excludeDescriptorPath,
)
assert.NotContains(output, "api_keys.json")
assert.NotContains(output, "user_token")
assert.NotContains(output, "test.sh")
assert.Contains(output, "cookie.jar")
assert.Contains(output, "mountain.jpg")
assert.Contains(output, "person.png")
})
it("should ONLY include specified files and directories", func() {
projectToml := `
[project]
name = "include test"
[[project.licenses]]
type = "MIT"
[build]
include = [ "*.jar", "media/mountain.jpg", "media/person.png" ]
`
includeDescriptorPath := filepath.Join(tempAppDir, "include.toml")
err := ioutil.WriteFile(includeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
"--buildpack", buildpackTgz,
"--descriptor", includeDescriptorPath,
)
assert.NotContains(output, "api_keys.json")
assert.NotContains(output, "user_token")
assert.NotContains(output, "test.sh")
assert.Contains(output, "cookie.jar")
assert.Contains(output, "mountain.jpg")
assert.Contains(output, "person.png")
})
})
})
})
})
when("inspect-builder", func() {
when("inspecting a nested builder", func() {
it.Before(func() {
// create our nested builder
h.SkipIf(t, dockerHostOS() == "windows", "These tests are not yet compatible with Windows-based containers")
h.SkipUnless(t,
pack.Supports("inspect-builder --depth"),
"pack does not support 'package-buildpack'",
)
// create a task, handled by a 'task manager' which executes our pack commands during tests.
// looks like this is used to de-dup tasks
key := taskKey(
"create-complex-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
// run task on taskmanager and save output, in case there are future calls to the same task
// likely all our changes need to go on the createBuilderPack.
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createComplexBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
})
assert.Nil(err)
// register task to be run to 'clean up' a task
suiteManager.RegisterCleanUp("clean-"+key, func() error {
return h.DockerRmi(dockerCli, value)
})
builderName = value
})
it("displays nested Detection Order groups", func() {
output := pack.RunSuccessfully(
"set-run-image-mirrors", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assert.Equal(output, "Run Image 'pack-test/run' configured with mirror 'some-registry.com/pack-test/run1'\n")
output = pack.RunSuccessfully("inspect-builder", builderName)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output.txt",
createBuilderPack.Version(),
"inspect_builder_nested_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
it("provides nested detection output up to depth", func() {
output := pack.RunSuccessfully(
"set-run-image-mirrors", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assert.Equal(output, "Run Image 'pack-test/run' configured with mirror 'some-registry.com/pack-test/run1'\n")
depth := "2"
if pack.SupportsFeature(invoke.InspectBuilderOutputFormat) {
depth = "1" // The meaning of depth was changed
}
output = pack.RunSuccessfully("inspect-builder", "--depth", depth, builderName)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_depth_2_output.txt",
createBuilderPack.Version(),
"inspect_builder_nested_depth_2_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
when("output format is toml", func() {
it("prints builder information in toml format", func() {
h.SkipUnless(t,
pack.SupportsFeature(invoke.InspectBuilderOutputFormat),
"inspect-builder output format is not yet implemented",
)
output := pack.RunSuccessfully(
"set-run-image-mirrors", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assert.Equal(output, "Run Image 'pack-test/run' configured with mirror 'some-registry.com/pack-test/run1'\n")
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "toml")
err := toml.NewDecoder(strings.NewReader(string(output))).Decode(&struct{}{})
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.TOMLOutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_toml.txt",
createBuilderPack.Version(),
"inspect_builder_nested_output_toml.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.TrimmedEq(string(output), expectedOutput)
})
})
when("output format is yaml", func() {
it("prints builder information in yaml format", func() {
h.SkipUnless(t,
pack.SupportsFeature(invoke.InspectBuilderOutputFormat),
"inspect-builder output format is not yet implemented",
)
output := pack.RunSuccessfully(
"set-run-image-mirrors", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assert.Equal(output, "Run Image 'pack-test/run' configured with mirror 'some-registry.com/pack-test/run1'\n")
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "yaml")
err := yaml.Unmarshal([]byte(output), &struct{}{})
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.YAMLOutputForAPIs(14)
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_yaml.txt",
createBuilderPack.Version(),
"inspect_builder_nested_output_yaml.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.TrimmedEq(string(output), expectedOutput)
})
})
when("output format is json", func() {
it("prints builder information in json format", func() {
h.SkipUnless(t,
pack.SupportsFeature(invoke.InspectBuilderOutputFormat),
"inspect-builder output format is not yet implemented",
)
output := pack.RunSuccessfully(
"set-run-image-mirrors", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assert.Equal(output, "Run Image 'pack-test/run' configured with mirror 'some-registry.com/pack-test/run1'\n")
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "json")
err := json.Unmarshal([]byte(output), &struct{}{})
assert.Nil(err)
var prettifiedOutput bytes.Buffer
err = json.Indent(&prettifiedOutput, []byte(output), "", " ")
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.JSONOutputForAPIs(8)
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_json.txt",
createBuilderPack.Version(),
"inspect_builder_nested_output_json.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.Equal(prettifiedOutput.String(), expectedOutput)
})
})
})
it("displays configuration for a builder (local and remote)", func() {
output := pack.RunSuccessfully(
"set-run-image-mirrors", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assert.Equal(output, "Run Image 'pack-test/run' configured with mirror 'some-registry.com/pack-test/run1'\n")
output = pack.RunSuccessfully("inspect-builder", builderName)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_output.txt",
createBuilderPack.Version(),
"inspect_builder_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
it("indicates builder is trusted", func() {
h.SkipUnless(t, pack.Supports("trust-builder"), "version of pack doesn't trust-builder command")
pack.JustRunSuccessfully("trust-builder", builderName)
pack.JustRunSuccessfully(
"set-run-image-mirrors", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
output := pack.RunSuccessfully("inspect-builder", builderName)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_output.txt",
createBuilderPack.Version(),
"inspect_builder_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "Yes",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
})
when("rebase", func() {
var repoName, runBefore, origID string
var buildRunImage func(string, string, string)
it.Before(func() {
pack.JustRunSuccessfully("trust-builder", builderName)
repoName = registryConfig.RepoName("some-org/" + h.RandString(10))
runBefore = registryConfig.RepoName("run-before/" + h.RandString(10))
buildRunImage = func(newRunImage, contents1, contents2 string) {
user := func() string {
if dockerHostOS() == "windows" {
return "ContainerAdministrator"
}
return "root"
}
h.CreateImage(t, dockerCli, newRunImage, fmt.Sprintf(`
FROM %s
USER %s
RUN echo %s > /contents1.txt
RUN echo %s > /contents2.txt
USER pack
`, runImage, user(), contents1, contents2))
}
buildRunImage(runBefore, "contents-before-1", "contents-before-2")
pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--builder", builderName,
"--run-image", runBefore,
"--pull-policy", pubcfg.PullNever.String(),
)
origID = h.ImageID(t, repoName)
assertMockAppRunsWithOutput(t,
assert,
repoName,
"contents-before-1",
"contents-before-2",
)
})
it.After(func() {
h.DockerRmi(dockerCli, origID, repoName, runBefore)
ref, err := name.ParseReference(repoName, name.WeakValidation)
assert.Nil(err)
buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli)
launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli)
assert.Succeeds(buildCacheVolume.Clear(context.TODO()))
assert.Succeeds(launchCacheVolume.Clear(context.TODO()))
})
when("daemon", func() {
when("--run-image", func() {
var runAfter string
it.Before(func() {
runAfter = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(runAfter, "contents-after-1", "contents-after-2")
})
it.After(func() {
assert.Succeeds(h.DockerRmi(dockerCli, runAfter))
})
it("uses provided run image", func() {
output := pack.RunSuccessfully(
"rebase", repoName,
"--run-image", runAfter,
"--pull-policy", pubcfg.PullNever.String(),
)
assert.Contains(output, fmt.Sprintf("Successfully rebased image '%s'", repoName))
assertMockAppRunsWithOutput(t,
assert,
repoName,
"contents-after-1",
"contents-after-2",
)
})
})
when("local config has a mirror", func() {
var localRunImageMirror string
it.Before(func() {
localRunImageMirror = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(localRunImageMirror, "local-mirror-after-1", "local-mirror-after-2")
pack.JustRunSuccessfully("set-run-image-mirrors", runImage, "-m", localRunImageMirror)
})
it.After(func() {
assert.Succeeds(h.DockerRmi(dockerCli, localRunImageMirror))
})
it("prefers the local mirror", func() {
output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", pubcfg.PullNever.String())
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror)
assertOutput.ReportsSuccessfulRebase(repoName)
assertMockAppRunsWithOutput(t,
assert,
repoName,
"local-mirror-after-1",
"local-mirror-after-2",
)
})
})
when("image metadata has a mirror", func() {
it.Before(func() {
// clean up existing mirror first to avoid leaking images
assert.Succeeds(h.DockerRmi(dockerCli, runImageMirror))
buildRunImage(runImageMirror, "mirror-after-1", "mirror-after-2")
})
it("selects the best mirror", func() {
output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", pubcfg.PullNever.String())
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSelectingRunImageMirror(runImageMirror)
assertOutput.ReportsSuccessfulRebase(repoName)
assertMockAppRunsWithOutput(t,
assert,
repoName,
"mirror-after-1",
"mirror-after-2",
)
})
})
})
when("--publish", func() {
it.Before(func() {
assert.Succeeds(h.PushImage(dockerCli, repoName, registryConfig))
})
when("--run-image", func() {
var runAfter string
it.Before(func() {
runAfter = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(runAfter, "contents-after-1", "contents-after-2")
assert.Succeeds(h.PushImage(dockerCli, runAfter, registryConfig))
})
it.After(func() {
h.DockerRmi(dockerCli, runAfter)
})
it("uses provided run image", func() {
output := pack.RunSuccessfully("rebase", repoName, "--publish", "--run-image", runAfter)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulRebase(repoName)
assert.Succeeds(h.PullImageWithAuth(dockerCli, repoName, registryConfig.RegistryAuth()))
assertMockAppRunsWithOutput(t,
assert,
repoName,
"contents-after-1",
"contents-after-2",
)
})
})
})
})
})
})
}
func buildpacksDir(bpAPIVersion string) string {
return filepath.Join("testdata", "mock_buildpacks", bpAPIVersion)
}
func createComplexBuilder(t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
lifecycle config.LifecycleAsset,
buildpackManager buildpacks.BuildpackManager,
runImageMirror string,
) (string, error) {
t.Log("creating complex builder image...")
// CREATE TEMP WORKING DIR
tmpDir, err := ioutil.TempDir("", "create-complex-test-builder")
if err != nil {
return "", err
}
defer os.RemoveAll(tmpDir)
// ARCHIVE BUILDPACKS
builderBuildpacks := []buildpacks.TestBuildpack{
buildpacks.Noop,
buildpacks.Noop2,
buildpacks.OtherStack,
buildpacks.ReadEnv,
}
templateMapping := map[string]interface{}{
"run_image_mirror": runImageMirror,
}
packageImageName := registryConfig.RepoName("nested-level-1-buildpack-" + h.RandString(8))
nestedLevelTwoBuildpackName := registryConfig.RepoName("nested-level-2-buildpack-" + h.RandString(8))
simpleLayersBuildpackName := registryConfig.RepoName("simple-layers-buildpack-" + h.RandString(8))
templateMapping["package_id"] = "simple/nested-level-1"
templateMapping["package_image_name"] = packageImageName
templateMapping["nested_level_1_buildpack"] = packageImageName
templateMapping["nested_level_2_buildpack"] = nestedLevelTwoBuildpackName
templateMapping["simple_layers_buildpack"] = simpleLayersBuildpackName
fixtureManager := pack.FixtureManager()
nestedLevelOneConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-1-package.toml")
assert.Nil(err)
fixtureManager.TemplateFixtureToFile(
"nested-level-1-buildpack_package.toml",
nestedLevelOneConfigFile,
templateMapping,
)
err = nestedLevelOneConfigFile.Close()
assert.Nil(err)
nestedLevelTwoConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-2-package.toml")
assert.Nil(err)
fixtureManager.TemplateFixtureToFile(
"nested-level-2-buildpack_package.toml",
nestedLevelTwoConfigFile,
templateMapping,
)
err = nestedLevelTwoConfigFile.Close()
assert.Nil(err)
packageImageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
nestedLevelOneConfigFile.Name(),
buildpacks.WithRequiredBuildpacks(
buildpacks.NestedLevelOne,
buildpacks.NewPackageImage(
t,
pack,
nestedLevelTwoBuildpackName,
nestedLevelTwoConfigFile.Name(),
buildpacks.WithRequiredBuildpacks(
buildpacks.NestedLevelTwo,
buildpacks.NewPackageImage(
t,
pack,
simpleLayersBuildpackName,
fixtureManager.FixtureLocation("simple-layers-buildpack_package.toml"),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
),
),
),
),
)
builderBuildpacks = append(
builderBuildpacks,
packageImageBuildpack,
)
buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...)
// ADD lifecycle
if lifecycle.HasLocation() {
lifecycleURI := lifecycle.EscapedPath()
t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI)
templateMapping["lifecycle_uri"] = lifecycleURI
} else {
lifecycleVersion := lifecycle.Version()
t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion)
templateMapping["lifecycle_version"] = lifecycleVersion
}
// RENDER builder.toml
builderConfigFile, err := ioutil.TempFile(tmpDir, "nested_builder.toml")
if err != nil {
return "", err
}
pack.FixtureManager().TemplateFixtureToFile("nested_builder.toml", builderConfigFile, templateMapping)
err = builderConfigFile.Close()
if err != nil {
return "", err
}
// NAME BUILDER
bldr := registryConfig.RepoName("test/builder-" + h.RandString(10))
// CREATE BUILDER
output := pack.RunSuccessfully(
"create-builder", bldr,
"-c", builderConfigFile.Name(),
"--no-color",
)
assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr))
assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig))
return bldr, nil
}
func createBuilder(
t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
lifecycle config.LifecycleAsset,
buildpackManager buildpacks.BuildpackManager,
runImageMirror string,
) (string, error) {
t.Log("creating builder image...")
// CREATE TEMP WORKING DIR
tmpDir, err := ioutil.TempDir("", "create-test-builder")
assert.Nil(err)
defer os.RemoveAll(tmpDir)
templateMapping := map[string]interface{}{
"run_image_mirror": runImageMirror,
}
// ARCHIVE BUILDPACKS
builderBuildpacks := []buildpacks.TestBuildpack{
buildpacks.Noop,
buildpacks.Noop2,
buildpacks.OtherStack,
buildpacks.ReadEnv,
}
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package.toml", dockerHostOS())
packageImageName := registryConfig.RepoName("simple-layers-package-image-buildpack-" + h.RandString(8))
packageImageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
builderBuildpacks = append(builderBuildpacks, packageImageBuildpack)
templateMapping["package_image_name"] = packageImageName
templateMapping["package_id"] = "simple/layers"
buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...)
// ADD lifecycle
var lifecycleURI string
var lifecycleVersion string
if lifecycle.HasLocation() {
lifecycleURI = lifecycle.EscapedPath()
t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI)
templateMapping["lifecycle_uri"] = lifecycleURI
} else {
lifecycleVersion = lifecycle.Version()
t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion)
templateMapping["lifecycle_version"] = lifecycleVersion
}
// RENDER builder.toml
configFileName := "builder.toml"
builderConfigFile, err := ioutil.TempFile(tmpDir, "builder.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
configFileName,
builderConfigFile,
templateMapping,
)
err = builderConfigFile.Close()
assert.Nil(err)
// NAME BUILDER
bldr := registryConfig.RepoName("test/builder-" + h.RandString(10))
// CREATE BUILDER
output := pack.RunSuccessfully(
"create-builder", bldr,
"-c", builderConfigFile.Name(),
"--no-color",
)
assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr))
assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig))
return bldr, nil
}
func generatePackageTomlWithOS(
t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
tmpDir string,
fixtureName string,
platform_os string,
) string {
t.Helper()
packageTomlFile, err := ioutil.TempFile(tmpDir, "package-*.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
fixtureName,
packageTomlFile,
map[string]interface{}{
"OS": platform_os,
},
)
assert.Nil(packageTomlFile.Close())
return packageTomlFile.Name()
}
func createStack(t *testing.T, dockerCli client.CommonAPIClient, runImageMirror string) error {
t.Helper()
t.Log("creating stack images...")
stackBaseDir := filepath.Join("testdata", "mock_stack", dockerHostOS())
if err := createStackImage(dockerCli, runImage, filepath.Join(stackBaseDir, "run")); err != nil {
return err
}
if err := createStackImage(dockerCli, buildImage, filepath.Join(stackBaseDir, "build")); err != nil {
return err
}
if err := dockerCli.ImageTag(context.Background(), runImage, runImageMirror); err != nil {
return err
}
if err := h.PushImage(dockerCli, runImageMirror, registryConfig); err != nil {
return err
}
return nil
}
func createStackImage(dockerCli client.CommonAPIClient, repoName string, dir string) error {
defaultFilterFunc := func(file string) bool { return true }
ctx := context.Background()
buildContext := archive.ReadDirAsTar(dir, "/", 0, 0, -1, true, defaultFilterFunc)
res, err := dockerCli.ImageBuild(ctx, buildContext, dockertypes.ImageBuildOptions{
Tags: []string{repoName},
Remove: true,
ForceRemove: true,
})
if err != nil {
return err
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
return err
}
return res.Body.Close()
}
type logWriter struct {
t *testing.T
}
func (l logWriter) Write(p []byte) (n int, err error) {
l.t.Helper()
l.t.Log(strings.TrimRight(string(p), "\n"))
return len(p), nil
}
func assertMockAppRunsWithOutput(t *testing.T, assert h.AssertionManager, repoName string, expectedOutputs ...string) {
t.Helper()
containerName := "test-" + h.RandString(10)
ctrID := runDockerImageExposePort(t, assert, containerName, repoName)
defer dockerCli.ContainerKill(context.TODO(), containerName, "SIGKILL")
defer dockerCli.ContainerRemove(context.TODO(), containerName, dockertypes.ContainerRemoveOptions{Force: true})
logs, err := dockerCli.ContainerLogs(context.TODO(), ctrID, dockertypes.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
assert.Nil(err)
copyErr := make(chan error)
go func() {
_, err := stdcopy.StdCopy(logWriter{t}, logWriter{t}, logs)
copyErr <- err
}()
launchPort := fetchHostPort(t, assert, containerName)
assertMockAppResponseContains(t, assert, launchPort, 10*time.Second, expectedOutputs...)
}
func assertMockAppLogs(t *testing.T, assert h.AssertionManager, repoName string, expectedOutputs ...string) {
t.Helper()
containerName := "test-" + h.RandString(10)
ctr, err := dockerCli.ContainerCreate(context.Background(), &container.Config{
Image: repoName,
}, nil, nil, containerName)
assert.Nil(err)
var b bytes.Buffer
err = h.RunContainer(context.Background(), dockerCli, ctr.ID, &b, &b)
assert.Nil(err)
for _, expectedOutput := range expectedOutputs {
assert.Contains(b.String(), expectedOutput)
}
}
func assertMockAppResponseContains(t *testing.T, assert h.AssertionManager, launchPort string, timeout time.Duration, expectedOutputs ...string) {
t.Helper()
resp := waitForResponse(t, launchPort, timeout)
for _, expected := range expectedOutputs {
assert.Contains(resp, expected)
}
}
func assertHasBase(t *testing.T, assert h.AssertionManager, image, base string) {
t.Helper()
imageInspect, _, err := dockerCli.ImageInspectWithRaw(context.Background(), image)
assert.Nil(err)
baseInspect, _, err := dockerCli.ImageInspectWithRaw(context.Background(), base)
assert.Nil(err)
for i, layer := range baseInspect.RootFS.Layers {
assert.Equal(imageInspect.RootFS.Layers[i], layer)
}
}
func fetchHostPort(t *testing.T, assert h.AssertionManager, dockerID string) string {
t.Helper()
i, err := dockerCli.ContainerInspect(context.Background(), dockerID)
assert.Nil(err)
for _, port := range i.NetworkSettings.Ports {
for _, binding := range port {
return binding.HostPort
}
}
t.Fatalf("Failed to fetch host port for %s: no ports exposed", dockerID)
return ""
}
func imgIDForRepoName(repoName string) (string, error) {
inspect, _, err := dockerCli.ImageInspectWithRaw(context.TODO(), repoName)
if err != nil {
return "", errors.Wrapf(err, "could not get image ID for image '%s'", repoName)
}
return inspect.ID, nil
}
func runDockerImageExposePort(t *testing.T, assert h.AssertionManager, containerName, repoName string) string {
t.Helper()
ctx := context.Background()
ctr, err := dockerCli.ContainerCreate(ctx, &container.Config{
Image: repoName,
ExposedPorts: map[nat.Port]struct{}{"8080/tcp": {}},
Healthcheck: nil,
}, &container.HostConfig{
PortBindings: nat.PortMap{
"8080/tcp": []nat.PortBinding{{}},
},
AutoRemove: true,
}, nil, containerName)
assert.Nil(err)
err = dockerCli.ContainerStart(ctx, ctr.ID, dockertypes.ContainerStartOptions{})
assert.Nil(err)
return ctr.ID
}
func waitForResponse(t *testing.T, port string, timeout time.Duration) string {
t.Helper()
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
resp, err := h.HTTPGetE("http://"+h.RegistryHost(h.DockerHostname(t), port), map[string]string{})
if err != nil {
break
}
return resp
case <-timer.C:
t.Fatalf("timeout waiting for response: %v", timeout)
}
}
}
func imageLabel(t *testing.T, assert h.AssertionManager, dockerCli client.CommonAPIClient, repoName, labelName string) string {
t.Helper()
inspect, _, err := dockerCli.ImageInspectWithRaw(context.Background(), repoName)
assert.Nil(err)
label, ok := inspect.Config.Labels[labelName]
if !ok {
t.Errorf("expected label %s to exist", labelName)
}
return label
}
func dockerHostOS() string {
daemonInfo, err := dockerCli.Info(context.TODO())
if err != nil {
panic(err.Error())
}
return daemonInfo.OSType
}
// taskKey creates a key from the prefix and all arguments to be unique
func taskKey(prefix string, args ...string) string {
hash := sha256.New()
for _, v := range args {
hash.Write([]byte(v))
}
return fmt.Sprintf("%s-%s", prefix, hex.EncodeToString(hash.Sum(nil)))
}
| [
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
bin/process_plug_in_packages.py | #!/usr/bin/env python
import sys
import __builtin__
import subprocess
import os
import argparse
# python puts the program's directory path in sys.path[0]. In other words,
# the user ordinarily has no way to override python's choice of a module from
# its own dir. We want to have that ability in our environment. However, we
# don't want to break any established python modules that depend on this
# behavior. So, we'll save the value from sys.path[0], delete it, import our
# modules and then restore sys.path to its original value.
save_path_0 = sys.path[0]
del sys.path[0]
from gen_print import *
from gen_valid import *
from gen_arg import *
from gen_plug_in import *
from gen_cmd import *
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
# I use this variable in calls to print_var.
hex = 1
# Create parser object to process command line parameters and args.
# Create parser object.
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS]',
description="%(prog)s will process the plug-in packages passed to it."
+ " A plug-in package is essentially a directory containing"
+ " one or more call point programs. Each of these call point"
+ " programs must have a prefix of \"cp_\". When calling"
+ " %(prog)s, a user must provide a call_point parameter"
+ " (described below). For each plug-in package passed,"
+ " %(prog)s will check for the presence of the specified call"
+ " point program in the plug-in directory. If it is found,"
+ " %(prog)s will run it. It is the responsibility of the"
+ " caller to set any environment variables needed by the call"
+ " point programs.\n\nAfter each call point program"
+ " has been run, %(prog)s will print the following values in"
+ " the following formats for use by the calling program:\n"
+ " failed_plug_in_name: <failed plug-in value,"
+ " if any>\n shell_rc: "
+ "<shell return code value of last call point program - this"
+ " will be printed in hexadecimal format. Also, be aware"
+ " that if a call point program returns a value it will be"
+ " shifted left 2 bytes (e.g. rc of 2 will be printed as"
+ " 0x00000200). That is because the rightmost byte is"
+ " reserved for errors in calling the call point program"
+ " rather than errors generated by the call point program.>",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
# Create arguments.
parser.add_argument(
'plug_in_dir_paths',
nargs='?',
default="",
help=plug_in_dir_paths_help_text + default_string)
parser.add_argument(
'--call_point',
default="setup",
required=True,
help='The call point program name. This value must not include the'
+ ' "cp_" prefix. For each plug-in package passed to this program,'
+ ' the specified call_point program will be called if it exists in'
+ ' the plug-in directory.' + default_string)
parser.add_argument(
'--allow_shell_rc',
default="0x00000000",
help='The user may supply a value other than zero to indicate an'
+ ' acceptable non-zero return code. For example, if this value'
+ ' equals 0x00000200, it means that for each plug-in call point that'
+ ' runs, a 0x00000200 will not be counted as a failure. See note'
+ ' above regarding left-shifting of return codes.' + default_string)
parser.add_argument(
'--stop_on_plug_in_failure',
default=1,
type=int,
choices=[1, 0],
help='If this parameter is set to 1, this program will stop and return '
+ 'non-zero if the call point program from any plug-in directory '
+ 'fails. Conversely, if it is set to false, this program will run '
+ 'the call point program from each and every plug-in directory '
+ 'regardless of their return values. Typical example cases where '
+ 'you\'d want to run all plug-in call points regardless of success '
+ 'or failure would be "cleanup" or "ffdc" call points.')
parser.add_argument(
'--stop_on_non_zero_rc',
default=0,
type=int,
choices=[1, 0],
help='If this parm is set to 1 and a plug-in call point program returns '
+ 'a valid non-zero return code (see "allow_shell_rc" parm above),'
+ ' this program will stop processing and return 0 (success). Since'
+ ' this constitutes a successful exit, this would normally be used'
+ ' where the caller wishes to stop processing if one of the plug-in'
+ ' directory call point programs returns a special value indicating'
+ ' that some special case has been found. An example might be in'
+ ' calling some kind of "check_errl" call point program. Such a'
+ ' call point program might return a 2 (i.e. 0x00000200) to indicate'
+ ' that a given error log entry was found in an "ignore" list and is'
+ ' therefore to be ignored. That being the case, no other'
+ ' "check_errl" call point program would need to be called.'
+ default_string)
parser.add_argument(
'--mch_class',
default="obmc",
help=mch_class_help_text + default_string)
# The stock_list will be passed to gen_get_options. We populate it with the
# names of stock parm options we want. These stock parms are pre-defined by
# gen_get_options.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
def exit_function(signal_number=0,
frame=None):
r"""
Execute whenever the program ends normally or with the signals that we
catch (i.e. TERM, INT).
"""
dprint_executing()
dprint_var(signal_number)
qprint_pgm_footer()
def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, our
program would terminate immediately with return code 143 and without
calling our exit_function.
"""
# Our convention is to set up exit_function with atexit.registr() so
# there is no need to explicitly call exit_function from here.
dprint_executing()
# Calling exit prevents us from returning to the code that was running
# when we received the signal.
exit(0)
def validate_parms():
r"""
Validate program parameters, etc. Return True or False accordingly.
"""
if not valid_value(call_point):
return False
global allow_shell_rc
if not valid_integer(allow_shell_rc):
return False
# Convert to hex string for consistency in printout.
allow_shell_rc = "0x%08x" % int(allow_shell_rc, 0)
set_pgm_arg(allow_shell_rc)
gen_post_validation(exit_function, signal_handler)
return True
def run_pgm(plug_in_dir_path,
call_point,
allow_shell_rc):
r"""
Run the call point program in the given plug_in_dir_path. Return the
following:
rc The return code - 0 = PASS, 1 = FAIL.
shell_rc The shell return code returned by
process_plug_in_packages.py.
failed_plug_in_name The failed plug in name (if any).
Description of arguments:
plug_in_dir_path The directory path where the call_point
program may be located.
call_point The call point (e.g. "setup"). This
program will look for a program named
"cp_" + call_point in the
plug_in_dir_path. If no such call point
program is found, this function returns an
rc of 0 (i.e. success).
allow_shell_rc The user may supply a value other than
zero to indicate an acceptable non-zero
return code. For example, if this value
equals 0x00000200, it means that for each
plug-in call point that runs, a 0x00000200
will not be counted as a failure. See
note above regarding left-shifting of
return codes.
"""
global autoscript
rc = 0
failed_plug_in_name = ""
shell_rc = 0x00000000
plug_in_name = os.path.basename(os.path.normpath(plug_in_dir_path))
cp_prefix = "cp_"
plug_in_pgm_path = plug_in_dir_path + cp_prefix + call_point
if not os.path.exists(plug_in_pgm_path):
# No such call point in this plug in dir path. This is legal so we
# return 0, etc.
return rc, shell_rc, failed_plug_in_name
# Get some stats on the file.
cmd_buf = "stat -c '%n %s %z' " + plug_in_pgm_path
dpissuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out_buf, err_buf = sub_proc.communicate()
shell_rc = sub_proc.returncode
if shell_rc != 0:
rc = 1
print_var(shell_rc, hex)
failed_plug_in_name = plug_in_name
print(out_buf)
print_var(failed_plug_in_name)
print_var(shell_rc, hex)
return rc, shell_rc, failed_plug_in_name
print("------------------------------------------------- Starting plug-"
+ "in -----------------------------------------------")
print(out_buf)
if autoscript:
stdout = 1 - quiet
if AUTOBOOT_OPENBMC_NICKNAME != "":
autoscript_prefix = AUTOBOOT_OPENBMC_NICKNAME + "."
else:
autoscript_prefix = ""
autoscript_prefix += plug_in_name + ".cp_" + call_point
autoscript_subcmd = "autoscript --quiet=1 --show_url=y --prefix=" +\
autoscript_prefix + " --stdout=" + str(stdout) + " -- "
else:
autoscript_subcmd = ""
cmd_buf = "PATH=" + plug_in_dir_path.rstrip("/") + ":${PATH} ; " +\
autoscript_subcmd + cp_prefix + call_point
pissuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
shell_rc = sub_proc.returncode
# Shift to left.
shell_rc *= 0x100
if shell_rc != 0 and shell_rc != allow_shell_rc:
rc = 1
failed_plug_in_name = plug_in_name
if shell_rc != 0:
failed_plug_in_name = plug_in_name
print("------------------------------------------------- Ending plug-in"
+ " -------------------------------------------------")
if failed_plug_in_name != "":
print_var(failed_plug_in_name)
print_var(shell_rc, hex)
return rc, shell_rc, failed_plug_in_name
def main():
r"""
This is the "main" function. The advantage of having this function vs
just doing this in the true mainline is that you can:
- Declare local variables
- Use "return" instead of "exit".
- Indent 4 chars like you would in any function.
This makes coding more consistent, i.e. it's easy to move code from here
into a function and vice versa.
"""
if not gen_get_options(parser, stock_list):
return False
if not validate_parms():
return False
qprint_pgm_header()
# Access program parameter globals.
global plug_in_dir_paths
global mch_class
global allow_shell_rc
global stop_on_plug_in_failure
global stop_on_non_zero_rc
plug_in_packages_list = return_plug_in_packages_list(plug_in_dir_paths,
mch_class)
qpvar(plug_in_packages_list)
qprint("\n")
allow_shell_rc = int(allow_shell_rc, 0)
shell_rc = 0
failed_plug_in_name = ""
# If the autoscript program is present, we will use it to direct call point
# program output to a separate status file. This keeps the output of the
# main program (i.e. OBMC Boot Test) cleaner and yet preserves call point
# output if it is needed for debug.
global autoscript
global AUTOBOOT_OPENBMC_NICKNAME
autoscript = 0
AUTOBOOT_OPENBMC_NICKNAME = ""
rc, out_buf = cmd_fnc("which autoscript", quiet=1, print_output=0,
show_err=0)
if rc == 0:
autoscript = 1
AUTOBOOT_OPENBMC_NICKNAME = os.environ.get("AUTOBOOT_OPENBMC_NICKNAME",
"")
ret_code = 0
for plug_in_dir_path in plug_in_packages_list:
rc, shell_rc, failed_plug_in_name = \
run_pgm(plug_in_dir_path, call_point, allow_shell_rc)
if rc != 0:
ret_code = 1
if stop_on_plug_in_failure:
break
if shell_rc != 0 and stop_on_non_zero_rc:
qprint_time("Stopping on non-zero shell return code as requested"
+ " by caller.\n")
break
if ret_code == 0:
return True
else:
if not stop_on_plug_in_failure:
# We print a summary error message to make the failure more
# obvious.
print_error("At least one plug-in failed.\n")
return False
# Main
if not main():
exit(1)
| []
| []
| [
"AUTOBOOT_OPENBMC_NICKNAME"
]
| [] | ["AUTOBOOT_OPENBMC_NICKNAME"] | python | 1 | 0 | |
myvenv/lib/python3.5/site-packages/sphinx/util/console.py | # -*- coding: utf-8 -*-
"""
sphinx.util.console
~~~~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import re
try:
# check if colorama is installed to support color on Windows
import colorama
except ImportError:
colorama = None
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
codes = {}
def get_terminal_width():
"""Borrowed from the py lib."""
try:
import termios
import fcntl
import struct
call = fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('hhhh', 0, 0, 0, 0))
height, width = struct.unpack('hhhh', call)[:2]
terminal_width = width
except Exception:
# FALLBACK
terminal_width = int(os.environ.get('COLUMNS', 80)) - 1
return terminal_width
_tw = get_terminal_width()
def term_width_line(text):
if not codes:
# if no coloring, don't output fancy backspaces
return text + '\n'
else:
# codes are not displayed, this must be taken into account
return text.ljust(_tw + len(text) - len(_ansi_re.sub('', text))) + '\r'
def color_terminal():
if sys.platform == 'win32' and colorama is not None:
colorama.init()
return True
if not hasattr(sys.stdout, 'isatty'):
return False
if not sys.stdout.isatty():
return False
if 'COLORTERM' in os.environ:
return True
term = os.environ.get('TERM', 'dumb').lower()
if term in ('xterm', 'linux') or 'color' in term:
return True
return False
def nocolor():
if sys.platform == 'win32' and colorama is not None:
colorama.deinit()
codes.clear()
def coloron():
codes.update(_orig_codes)
def colorize(name, text):
return codes.get(name, '') + text + codes.get('reset', '')
def strip_colors(s):
return re.compile('\x1b.*?m').sub('', s)
def create_color_func(name):
def inner(text):
return colorize(name, text)
globals()[name] = inner
_attrs = {
'reset': '39;49;00m',
'bold': '01m',
'faint': '02m',
'standout': '03m',
'underline': '04m',
'blink': '05m',
}
for _name, _value in _attrs.items():
codes[_name] = '\x1b[' + _value
_colors = [
('black', 'darkgray'),
('darkred', 'red'),
('darkgreen', 'green'),
('brown', 'yellow'),
('darkblue', 'blue'),
('purple', 'fuchsia'),
('turquoise', 'teal'),
('lightgray', 'white'),
]
for i, (dark, light) in enumerate(_colors):
codes[dark] = '\x1b[%im' % (i+30)
codes[light] = '\x1b[%i;01m' % (i+30)
_orig_codes = codes.copy()
for _name in codes:
create_color_func(_name)
| []
| []
| [
"TERM",
"COLUMNS"
]
| [] | ["TERM", "COLUMNS"] | python | 2 | 0 | |
modin/experimental/pandas/test/test_io_exp.py | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pandas
import pytest
import modin.experimental.pandas as pd
from modin.config import Engine
from modin.pandas.test.utils import df_equals
@pytest.mark.skipif(
Engine.get() == "Dask",
reason="Dask does not have experimental API",
)
def test_from_sql_distributed(make_sql_connection): # noqa: F811
if Engine.get() == "Ray":
filename = "test_from_sql_distributed.db"
table = "test_from_sql_distributed"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = pandas.read_sql(query, conn)
modin_df_from_query = pd.read_sql(
query,
conn,
partition_column="col1",
lower_bound=0,
upper_bound=6,
max_sessions=2,
)
modin_df_from_table = pd.read_sql(
table,
conn,
partition_column="col1",
lower_bound=0,
upper_bound=6,
max_sessions=2,
)
df_equals(modin_df_from_query, pandas_df)
df_equals(modin_df_from_table, pandas_df)
@pytest.mark.skipif(
Engine.get() == "Dask",
reason="Dask does not have experimental API",
)
def test_from_sql_defaults(make_sql_connection): # noqa: F811
filename = "test_from_sql_distributed.db"
table = "test_from_sql_distributed"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = pandas.read_sql(query, conn)
with pytest.warns(UserWarning):
modin_df_from_query = pd.read_sql(query, conn)
with pytest.warns(UserWarning):
modin_df_from_table = pd.read_sql(table, conn)
df_equals(modin_df_from_query, pandas_df)
df_equals(modin_df_from_table, pandas_df)
@pytest.mark.usefixtures("TestReadGlobCSVFixture")
@pytest.mark.skipif(
Engine.get() != "Ray", reason="Currently only support Ray engine for glob paths."
)
class TestCsvGlob:
def test_read_multiple_small_csv(self): # noqa: F811
pandas_df = pandas.concat([pandas.read_csv(fname) for fname in pytest.files])
modin_df = pd.read_csv_glob(pytest.glob_path)
# Indexes get messed up when concatting so we reset both.
pandas_df = pandas_df.reset_index(drop=True)
modin_df = modin_df.reset_index(drop=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("nrows", [35, 100])
def test_read_multiple_csv_nrows(self, request, nrows): # noqa: F811
pandas_df = pandas.concat([pandas.read_csv(fname) for fname in pytest.files])
pandas_df = pandas_df.iloc[:nrows, :]
modin_df = pd.read_csv_glob(pytest.glob_path, nrows=nrows)
# Indexes get messed up when concatting so we reset both.
pandas_df = pandas_df.reset_index(drop=True)
modin_df = modin_df.reset_index(drop=True)
df_equals(modin_df, pandas_df)
@pytest.mark.skipif(
Engine.get() != "Ray", reason="Currently only support Ray engine for glob paths."
)
def test_read_multiple_csv_s3():
modin_df = pd.read_csv_glob("S3://noaa-ghcn-pds/csv/178*.csv")
# We have to specify the columns because the column names are not identical. Since we specified the column names, we also have to skip the original column names.
pandas_dfs = [
pandas.read_csv(
"s3://noaa-ghcn-pds/csv/178{}.csv".format(i),
names=modin_df.columns,
skiprows=[0],
)
for i in range(10)
]
pandas_df = pd.concat(pandas_dfs)
# Indexes get messed up when concatting so we reset both.
pandas_df = pandas_df.reset_index(drop=True)
modin_df = modin_df.reset_index(drop=True)
df_equals(modin_df, pandas_df)
| []
| []
| []
| [] | [] | python | null | null | null |
vendor/github.com/opencontainers/runc/utils_linux.go | // +build linux
package main
import (
"errors"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/coreos/go-systemd/activation"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/runc/libcontainer/specconv"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/urfave/cli"
)
var errEmptyID = errors.New("container id cannot be empty")
var container libcontainer.Container
// loadFactory returns the configured factory instance for execing containers.
func loadFactory(context *cli.Context) (libcontainer.Factory, error) {
root := context.GlobalString("root")
abs, err := filepath.Abs(root)
if err != nil {
return nil, err
}
cgroupManager := libcontainer.Cgroupfs
if context.GlobalBool("systemd-cgroup") {
if systemd.UseSystemd() {
cgroupManager = libcontainer.SystemdCgroups
} else {
return nil, fmt.Errorf("systemd cgroup flag passed, but systemd support for managing cgroups is not available")
}
}
return libcontainer.New(abs, cgroupManager, libcontainer.CriuPath(context.GlobalString("criu")))
}
// getContainer returns the specified container instance by loading it from state
// with the default factory.
func getContainer(context *cli.Context) (libcontainer.Container, error) {
id := context.Args().First()
if id == "" {
return nil, errEmptyID
}
factory, err := loadFactory(context)
if err != nil {
return nil, err
}
return factory.Load(id)
}
func fatalf(t string, v ...interface{}) {
fatal(fmt.Errorf(t, v...))
}
func getDefaultImagePath(context *cli.Context) string {
cwd, err := os.Getwd()
if err != nil {
panic(err)
}
return filepath.Join(cwd, "checkpoint")
}
// newProcess returns a new libcontainer Process with the arguments from the
// spec and stdio from the current process.
func newProcess(p specs.Process) (*libcontainer.Process, error) {
lp := &libcontainer.Process{
Args: p.Args,
Env: p.Env,
// TODO: fix libcontainer's API to better support uid/gid in a typesafe way.
User: fmt.Sprintf("%d:%d", p.User.UID, p.User.GID),
Cwd: p.Cwd,
Capabilities: p.Capabilities,
Label: p.SelinuxLabel,
NoNewPrivileges: &p.NoNewPrivileges,
AppArmorProfile: p.ApparmorProfile,
}
for _, gid := range p.User.AdditionalGids {
lp.AdditionalGroups = append(lp.AdditionalGroups, strconv.FormatUint(uint64(gid), 10))
}
for _, rlimit := range p.Rlimits {
rl, err := createLibContainerRlimit(rlimit)
if err != nil {
return nil, err
}
lp.Rlimits = append(lp.Rlimits, rl)
}
return lp, nil
}
// If systemd is supporting sd_notify protocol, this function will add support
// for sd_notify protocol from within the container.
func setupSdNotify(spec *specs.Spec, notifySocket string) {
spec.Mounts = append(spec.Mounts, specs.Mount{Destination: notifySocket, Type: "bind", Source: notifySocket, Options: []string{"bind"}})
spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notifySocket))
}
func destroy(container libcontainer.Container) {
if err := container.Destroy(); err != nil {
logrus.Error(err)
}
}
// setupIO modifies the given process config according to the options.
func setupIO(process *libcontainer.Process, rootuid, rootgid int, createTTY, detach bool) (*tty, error) {
// This is entirely handled by recvtty.
if createTTY {
process.Stdin = nil
process.Stdout = nil
process.Stderr = nil
return &tty{}, nil
}
// When we detach, we just dup over stdio and call it a day. There's no
// requirement that we set up anything nice for our caller or the
// container.
if detach {
if err := dupStdio(process, rootuid, rootgid); err != nil {
return nil, err
}
return &tty{}, nil
}
// XXX: This doesn't sit right with me. It's ugly.
return createStdioPipes(process, rootuid, rootgid)
}
// createPidFile creates a file with the processes pid inside it atomically
// it creates a temp file with the paths filename + '.' infront of it
// then renames the file
func createPidFile(path string, process *libcontainer.Process) error {
pid, err := process.Pid()
if err != nil {
return err
}
var (
tmpDir = filepath.Dir(path)
tmpName = filepath.Join(tmpDir, fmt.Sprintf(".%s", filepath.Base(path)))
)
f, err := os.OpenFile(tmpName, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666)
if err != nil {
return err
}
_, err = fmt.Fprintf(f, "%d", pid)
f.Close()
if err != nil {
return err
}
return os.Rename(tmpName, path)
}
func createContainer(context *cli.Context, id string, spec *specs.Spec) (libcontainer.Container, error) {
config, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{
CgroupName: id,
UseSystemdCgroup: context.GlobalBool("systemd-cgroup"),
NoPivotRoot: context.Bool("no-pivot"),
NoNewKeyring: context.Bool("no-new-keyring"),
Spec: spec,
})
if err != nil {
return nil, err
}
factory, err := loadFactory(context)
if err != nil {
return nil, err
}
return factory.Create(id, config)
}
type runner struct {
enableSubreaper bool
shouldDestroy bool
detach bool
listenFDs []*os.File
pidFile string
consoleSocket string
container libcontainer.Container
create bool
}
func (r *runner) terminalinfo() *libcontainer.TerminalInfo {
return libcontainer.NewTerminalInfo(r.container.ID())
}
func (r *runner) run(config *specs.Process) (int, error) {
process, err := newProcess(*config)
if err != nil {
r.destroy()
return -1, err
}
if len(r.listenFDs) > 0 {
process.Env = append(process.Env, fmt.Sprintf("LISTEN_FDS=%d", len(r.listenFDs)), "LISTEN_PID=1")
process.ExtraFiles = append(process.ExtraFiles, r.listenFDs...)
}
rootuid, err := r.container.Config().HostUID()
if err != nil {
r.destroy()
return -1, err
}
rootgid, err := r.container.Config().HostGID()
if err != nil {
r.destroy()
return -1, err
}
detach := r.detach || r.create
// Check command-line for sanity.
if detach && config.Terminal && r.consoleSocket == "" {
r.destroy()
return -1, fmt.Errorf("cannot allocate tty if runc will detach without setting console socket")
}
// XXX: Should we change this?
if (!detach || !config.Terminal) && r.consoleSocket != "" {
r.destroy()
return -1, fmt.Errorf("cannot use console socket if runc will not detach or allocate tty")
}
startFn := r.container.Start
if !r.create {
startFn = r.container.Run
}
// Setting up IO is a two stage process. We need to modify process to deal
// with detaching containers, and then we get a tty after the container has
// started.
handler := newSignalHandler(r.enableSubreaper)
tty, err := setupIO(process, rootuid, rootgid, config.Terminal, detach)
if err != nil {
r.destroy()
return -1, err
}
if err := startFn(process); err != nil {
r.destroy()
return -1, err
}
if config.Terminal {
if err := tty.recvtty(process, r.detach || r.create); err != nil {
r.terminate(process)
r.destroy()
return -1, err
}
}
defer tty.Close()
if config.Terminal && detach {
conn, err := net.Dial("unix", r.consoleSocket)
if err != nil {
r.terminate(process)
r.destroy()
return -1, err
}
defer conn.Close()
unixconn, ok := conn.(*net.UnixConn)
if !ok {
r.terminate(process)
r.destroy()
return -1, fmt.Errorf("casting to UnixConn failed")
}
socket, err := unixconn.File()
if err != nil {
r.terminate(process)
r.destroy()
return -1, err
}
defer socket.Close()
err = tty.sendtty(socket, r.terminalinfo())
if err != nil {
r.terminate(process)
r.destroy()
return -1, err
}
}
if err := tty.ClosePostStart(); err != nil {
r.terminate(process)
r.destroy()
return -1, err
}
if r.pidFile != "" {
if err := createPidFile(r.pidFile, process); err != nil {
r.terminate(process)
r.destroy()
return -1, err
}
}
if detach {
return 0, nil
}
status, err := handler.forward(process, tty)
if err != nil {
r.terminate(process)
}
r.destroy()
return status, err
}
func (r *runner) destroy() {
if r.shouldDestroy {
destroy(r.container)
}
}
func (r *runner) terminate(p *libcontainer.Process) {
p.Signal(syscall.SIGKILL)
p.Wait()
}
func validateProcessSpec(spec *specs.Process) error {
if spec.Cwd == "" {
return fmt.Errorf("Cwd property must not be empty")
}
if !filepath.IsAbs(spec.Cwd) {
return fmt.Errorf("Cwd must be an absolute path")
}
if len(spec.Args) == 0 {
return fmt.Errorf("args must not be empty")
}
return nil
}
func startContainer(context *cli.Context, spec *specs.Spec, create bool) (int, error) {
id := context.Args().First()
if id == "" {
return -1, errEmptyID
}
container, err := createContainer(context, id, spec)
if err != nil {
return -1, err
}
// Support on-demand socket activation by passing file descriptors into the container init process.
listenFDs := []*os.File{}
if os.Getenv("LISTEN_FDS") != "" {
listenFDs = activation.Files(false)
}
r := &runner{
enableSubreaper: !context.Bool("no-subreaper"),
shouldDestroy: true,
container: container,
listenFDs: listenFDs,
consoleSocket: context.String("console-socket"),
detach: context.Bool("detach"),
pidFile: context.String("pid-file"),
create: create,
}
return r.run(&spec.Process)
}
| [
"\"LISTEN_FDS\""
]
| []
| [
"LISTEN_FDS"
]
| [] | ["LISTEN_FDS"] | go | 1 | 0 | |
scraper.py | """This file runs on Replit"""
import smtplib
import os
import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
YOUTUBE_TRENDING_URL = 'https://www.youtube.com/feed/trending'
def get_driver():
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(options=chrome_options)
return driver
def get_videos(driver):
VIDEO_DIV_TAG = 'ytd-video-renderer'
driver.get(YOUTUBE_TRENDING_URL)
videos = driver.find_elements(By.TAG_NAME, VIDEO_DIV_TAG)
return videos
def parse_video(video):
title_tag = video.find_element(By.ID, 'video-title')
title = title_tag.text
url = title_tag.get_attribute('href')
thumbnail_tag = video.find_element(By.TAG_NAME, 'img')
thumbnail_url = thumbnail_tag.get_attribute('src')
channel_div = video.find_element(By.CLASS_NAME, 'ytd-channel-name')
channel_name = channel_div.text
description = video.find_element(By.ID, 'description-text').text
return {
'title': title,
'url': url,
'thumbnail_url': thumbnail_url,
'channel': channel_name,
'description': description
}
def send_email(body):
try:
server_ssl = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server_ssl.ehlo()
SENDER_EMAIL = '[email protected]'
RECEIVER_EMAIL = '[email protected]'
SENDER_PASSWORD = os.environ['GMAIL_PASSWORD']
subject = 'YouTube Trending Videos'
email_text = f"""
From: {SENDER_EMAIL}
To: {RECEIVER_EMAIL}
Subject: {subject}
{body}
"""
server_ssl.login(SENDER_EMAIL, SENDER_PASSWORD)
server_ssl.sendmail(SENDER_EMAIL, RECEIVER_EMAIL, email_text)
server_ssl.close()
except:
print('Something went wrong...')
if __name__ == "__main__":
print('Creating driver')
driver = get_driver()
print('Fetching trending videos')
videos = get_videos(driver)
print(f'Found {len(videos)} videos')
print('Parsing top 10 videos')
videos_data = [parse_video(video) for video in videos[:10]]
print('Save the data to a CSV')
# videos_df = pd.DataFrame(videos_data)
# print(videos_df)
# videos_df.to_csv('trending.csv', index=None)
print("Send the results over email")
body = json.dumps(videos_data, indent=2)
send_email(body)
print('Finished.')
| []
| []
| [
"GMAIL_PASSWORD"
]
| [] | ["GMAIL_PASSWORD"] | python | 1 | 0 | |
vendor/github.com/heketi/heketi/client/cli/go/cmds/root.go | //
// Copyright (c) 2015 The heketi Authors
//
// This file is licensed to you under your choice of the GNU Lesser
// General Public License, version 3 or any later version (LGPLv3 or
// later), or the GNU General Public License, version 2 (GPLv2), in all
// cases as published by the Free Software Foundation.
//
package cmds
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
)
var (
HEKETI_CLI_VERSION = "(dev)"
stderr io.Writer
stdout io.Writer
options Options
version bool
)
const (
defaultCliServer = "http://localhost:8080"
)
// Main arguments
type Options struct {
Url, Key, User string
Json bool
InsecureTLS bool
TLSCerts []string
}
var RootCmd = &cobra.Command{
Use: "heketi-cli",
Short: "Command line program for Heketi",
Long: "Command line program for Heketi",
Example: ` $ heketi-cli volume list`,
Run: func(cmd *cobra.Command, args []string) {
if version {
fmt.Printf("heketi-cli %v\n", HEKETI_CLI_VERSION)
} else {
cmd.Usage()
}
},
}
func init() {
cobra.OnInitialize(initConfig)
RootCmd.PersistentFlags().StringVarP(&options.Url, "server", "s", "",
"\n\tHeketi server. Can also be set using the"+
"\n\tenvironment variable HEKETI_CLI_SERVER (the default one is http://localhost:8080)")
RootCmd.PersistentFlags().StringVar(&options.Key, "secret", "",
"\n\tSecret key for specified user. Can also be"+
"\n\tset using the environment variable HEKETI_CLI_KEY")
RootCmd.PersistentFlags().StringVar(&options.User, "user", "",
"\n\tHeketi user. Can also be set using the"+
"\n\tenvironment variable HEKETI_CLI_USER")
RootCmd.PersistentFlags().BoolVar(&options.Json, "json", false,
"\n\tPrint response as JSON")
RootCmd.Flags().BoolVarP(&version, "version", "v", false,
"\n\tPrint version")
RootCmd.PersistentFlags().BoolVarP(&options.InsecureTLS,
"insecure-tls", "I", false,
"\n\tIf using TLS (HTTPS) do not verify server certificates (insecure)")
RootCmd.PersistentFlags().StringSliceVarP(&options.TLSCerts,
"tls-cert", "C", []string{},
"\n\tIf using TLS (HTTPS), specify a certificate file that can be used to verify"+
"\n\tthe TLS connection (can be repeated)")
RootCmd.SilenceUsage = true
}
func initConfig() {
// Check server
if options.Url == "" {
options.Url = os.Getenv("HEKETI_CLI_SERVER")
if options.Url == "" {
options.Url = defaultCliServer
}
}
// Check user
if options.Key == "" {
options.Key = os.Getenv("HEKETI_CLI_KEY")
}
// Check key
if options.User == "" {
options.User = os.Getenv("HEKETI_CLI_USER")
}
}
func NewHeketiCli(heketiVersion string, mstderr io.Writer, mstdout io.Writer) *cobra.Command {
stderr = mstderr
stdout = mstdout
HEKETI_CLI_VERSION = heketiVersion
return RootCmd
}
| [
"\"HEKETI_CLI_SERVER\"",
"\"HEKETI_CLI_KEY\"",
"\"HEKETI_CLI_USER\""
]
| []
| [
"HEKETI_CLI_USER",
"HEKETI_CLI_KEY",
"HEKETI_CLI_SERVER"
]
| [] | ["HEKETI_CLI_USER", "HEKETI_CLI_KEY", "HEKETI_CLI_SERVER"] | go | 3 | 0 | |
shepherd/errors/api.py | from apistrap.errors import ApiClientError, ApiServerError
__all__ = ['ApiServerError', 'ApiClientError', 'UnknownSheepError', 'UnknownJobError', 'StorageError',
'StorageInaccessibleError', 'NameConflictError']
class UnknownSheepError(ApiClientError):
"""Exception raised when application attempts to use a sheep with an unknown id."""
class UnknownJobError(ApiClientError):
"""Exception raised when a client asks about a job that is not assigned to this shepherd."""
class StorageError(ApiServerError):
"""Exception raised when application encounters some issue with the minio storage."""
class StorageInaccessibleError(ApiServerError):
"""Exception raised when the remote storage is not accessible at the moment"""
class NameConflictError(ApiClientError):
"""Exception raised when a client chooses a job ID that was already used"""
| []
| []
| []
| [] | [] | python | null | null | null |
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/erikfastermann/feeder/db"
"github.com/erikfastermann/feeder/handler"
"github.com/erikfastermann/httpwrap"
)
func main() {
if err := run(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func run() error {
if len(os.Args) != 8 {
return fmt.Errorf("USAGE: %s ADDRESS CERT_FILE KEY_FILE TEMPLATE_GLOB CSV_CTR CSV_FEEDS CSV_ITEMS", os.Args[0])
}
addr := os.Args[1]
crt, key := os.Args[2], os.Args[3]
tmplt := os.Args[4]
ctr, feeds, items := os.Args[5], os.Args[6], os.Args[7]
username := os.Getenv("FEEDER_USERNAME")
if username == "" {
return fmt.Errorf("environment variable FEEDER_USERNAME empty or unset")
}
password := os.Getenv("FEEDER_PASSWORD")
if password == "" {
return fmt.Errorf("environment variable FEEDER_PASSWORD empty or unset")
}
csv, err := db.Open(ctr, feeds, items)
if err != nil {
return err
}
defer csv.Close()
h := &handler.Handler{
Logger: log.New(os.Stderr, "ERROR ", log.LstdFlags),
Username: username,
Password: password,
TemplateGlob: tmplt,
DB: csv,
}
return http.ListenAndServeTLS(addr, crt, key, httpwrap.Log(httpwrap.HandleError(h)))
}
| [
"\"FEEDER_USERNAME\"",
"\"FEEDER_PASSWORD\""
]
| []
| [
"FEEDER_USERNAME",
"FEEDER_PASSWORD"
]
| [] | ["FEEDER_USERNAME", "FEEDER_PASSWORD"] | go | 2 | 0 | |
mychat/asgi.py | """
ASGI config for mychat project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from channels.routing import ProtocolTypeRouter, URLRouter
from django.core.asgi import get_asgi_application
import chat.routing
from channels.auth import AuthMiddlewareStack
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mychat.settings')
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": AuthMiddlewareStack(
URLRouter(
chat.routing.websocket_urlpatterns
)
),
})
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/GCN/gcn_MB.py | #! /usr/bin/env python
# GPTune Copyright (c) 2019, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S.Dept. of Energy) and the University of
# California, Berkeley. All rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at [email protected].
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights.
# As such, the U.S. Government has been granted for itself and others acting
# on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in
# the Software to reproduce, distribute copies to the public, prepare
# derivative works, and perform publicly and display publicly, and to permit
# other to do so.
#
################################################################################
"""
Example of invocation of this script:
mpirun -n 1 python gcn_MB.py -dataset 'cora-citeseer' -nprocmin_pernode 1 -ntask 2 -nrun 10
where:
-nprocmin_pernode minimum number of MPIs per node for launching the application code
-ntask number of different tasks to be tuned
-nrun number of calls per task
-dataset name of dataset to be tune on
Description of the parameters of GCN:
Task space:
-dataset
Input space:
lr: learning rate
hidden: number of hidden layers
weight_decay: the L2 loss on GCN parameters
dropout: dropout rate
"""
import sys, os
# add GPTunde path in front of all python pkg path
from autotune.search import *
from autotune.space import *
from autotune.problem import *
from gptune import * # import all
sys.path.insert(0, os.path.abspath(__file__ + "/../GCN-driver/"))
from GCNdriver import GCNdriver
sys.path.insert(0, os.path.abspath(__file__ + "/../../../GPTune/"))
import re
import numpy as np
import time
import argparse
import pickle
from random import *
from callopentuner import OpenTuner
from callhpbandster import HpBandSter, HpBandSter_bandit
import math
import functools
import scipy
def objectives(point):
bmin = point['bmin']
bmax = point['bmax']
eta = point['eta']
params = [(point['dataset'], point["lr"],
point["hidden"], point["dropout"],
point["weight_decay"])]
max_epoch=500
min_epoch=100
# map budget to fidelity, i.e., percentage of training data
def budget_map(b, nmin=min_epoch, nmax=max_epoch):
k = (nmax-nmin)/(bmax-bmin)
m = nmax-bmax*k
if b == bmin:
return nmin
elif b == bmax:
return nmax
else:
return k * b + m
try:
budget = budget_map(int(point["budget"]))
except:
budget = None
validation_loss = GCNdriver(params, budget=budget, max_epoch=max_epoch, device=point["device"], seed=41)
print(params, ' valiation loss: ', validation_loss)
return validation_loss
def main():
(machine, processor, nodes, cores) = GetMachineConfiguration()
print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))
# Parse command line arguments
args = parse_args()
bmin = args.bmin
device = args.device
bmax = args.bmax
eta = args.eta
nrun = args.nrun
npernode = args.npernode
ntask = args.ntask
Nloop = args.Nloop
restart = args.restart
TUNER_NAME = args.optimization
ot.RandomGenerator.SetSeed(args.seed)
TLA = False
print(args)
print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))
os.environ['MACHINE_NAME'] = machine
os.environ['TUNER_NAME'] = TUNER_NAME
dataset = Categoricalnorm(['cora', 'citeseer'], transform="onehot", name="dataset")
lr = Real(1e-5, 1e-2, name="lr")
hidden = Integer(4, 64, transform="normalize", name="hidden")
weight_decay = Real(1e-5, 1e-2, name="weight_decay")
dropout = Real(0.1, 0.9, name="dropout")
validation_loss = Real(0., 1., name="validation_loss")
IS = Space([dataset])
PS = Space([weight_decay, hidden, lr, dropout])
OS = Space([validation_loss])
constraints = {}
constants={"nodes":nodes,"cores":cores,"npernode":npernode,"bmin":bmin,"bmax":bmax,"eta":eta, "device":device}
print(IS, PS, OS, constraints)
problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants)
computer = Computer(nodes=nodes, cores=cores, hosts=None)
options = Options()
options['model_processes'] = 4 # parallel cholesky for each LCM kernel
# options['model_threads'] = 1
# options['model_restarts'] = args.Nrestarts
# options['distributed_memory_parallelism'] = False
# parallel model restart
options['model_restarts'] = restart
options['distributed_memory_parallelism'] = False
options['shared_memory_parallelism'] = False
# options['mpi_comm'] = None
options['model_class'] = 'Model_LCM' # Model_GPy_LCM or Model_LCM
options['verbose'] = False
options['sample_class'] = 'SampleOpenTURNS'
options['budget_min'] = bmin
options['budget_max'] = bmax
options['budget_base'] = eta
smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base'])))
budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)]
NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)]
NSs_all = NSs.copy()
budget_all = budgets.copy()
for s in range(smax+1):
for n in range(s):
NSs_all.append(int(NSs[s]/options['budget_base']**(n+1)))
budget_all.append(int(budgets[s]*options['budget_base']**(n+1)))
Ntotal = int(sum(NSs_all) * Nloop)
Btotal = int(np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max'] * Nloop) # total number of evaluations at highest budget -- used for single-fidelity tuners
print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}")
print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all)
print("total number of samples: ", Ntotal)
print("total number of evaluations at highest budget: ", Btotal)
print()
options.validate(computer = computer)
data = Data(problem)
# giventask = [[0.2, 0.5]]
giventask = []
dataset_list = args.dataset.split('-')
for dataset in dataset_list:
giventask.append([dataset])
NI=len(giventask)
assert NI == ntask # make sure number of tasks match
if(TUNER_NAME=='GPTune'):
gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__))
""" Building MLA with the given list of tasks """
NS = Btotal
if args.nrun > 0:
NS = args.nrun
NS1 = max(NS//2, 1)
(data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid])
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
if(TUNER_NAME=='opentuner'):
NS = Btotal
(data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid])
print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
# results_file.write(f" Ps {data.P[tid][:NS]}\n")
results_file.write(f" Os {data.O[tid][:NS].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
# single-fidelity version of hpbandster
if(TUNER_NAME=='TPE'):
NS = Btotal
(data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="HpBandSter", niter=1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
if(TUNER_NAME=='GPTuneBand'):
data = Data(problem)
gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options)
(data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
nth = np.argmin(data.O[tid])
Popt = data.P[tid][nth]
# find which arm and which sample the optimal param is from
for arm in range(len(data_hist.P)):
try:
idx = (data_hist.P[arm]).index(Popt)
arm_opt = arm
except ValueError:
pass
print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth)
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
# results_file.write(f" Ps {data.P[tid]}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
# multi-fidelity version
if(TUNER_NAME=='hpbandster'):
NS = Ntotal
(data,stats)=HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
# print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
max_budget = 0.
Oopt = 99999
Popt = None
nth = None
for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())):
for subout in out[0]:
budget_cur = subout[0]
if budget_cur > max_budget:
max_budget = budget_cur
Oopt = subout[1]
Popt = config
nth = idx
elif budget_cur == max_budget:
if subout[1] < Oopt:
Oopt = subout[1]
Popt = config
nth = idx
print(' Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth)
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
# results_file.write(f" Ps {data.P[tid]}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
def parse_args():
parser = argparse.ArgumentParser()
# Problem related arguments
# Machine related arguments
parser.add_argument('-nodes', type=int, default=1, help='Number of machine nodes')
parser.add_argument('-cores', type=int, default=1, help='Number of cores per machine node')
parser.add_argument('-npernode', type=int, default=1,help='Minimum number of MPIs per machine node for the application code')
# parser.add_argument('-machine', type=str, help='Name of the computer (not hostname)')
parser.add_argument('-device', type=str, default='cpu', help='torch.device: cpu or cuda')
# Algorithm related arguments
parser.add_argument('-optimization', type=str,default='GPTune',help='Optimization algorithm (opentuner, hpbandster, GPTune)')
parser.add_argument('-ntask', type=int, default=-1, help='Number of tasks')
parser.add_argument('-dataset', type=str, default='MNIST', help='target dataset to train GCN on')
parser.add_argument('-nrun', type=int, default=-1, help='Number of runs per task')
parser.add_argument('-bmin', type=int, default=1, help='minimum fidelity for a bandit structure')
parser.add_argument('-bmax', type=int, default=8, help='maximum fidelity for a bandit structure')
parser.add_argument('-eta', type=int, default=2, help='base value for a bandit structure')
parser.add_argument('-Nloop', type=int, default=1, help='number of GPTuneBand loops')
parser.add_argument('-restart', type=int, default=2, help='number of GPTune MLA restart')
parser.add_argument('-expid', type=str, default='-', help='experiment id')
parser.add_argument('-seed', type=int, default=0, help='random seed')
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| []
| []
| [
"TUNER_NAME",
"MACHINE_NAME"
]
| [] | ["TUNER_NAME", "MACHINE_NAME"] | python | 2 | 0 | |
provider/cloudflare.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"context"
"fmt"
"os"
"sort"
"strconv"
"strings"
cloudflare "github.com/cloudflare/cloudflare-go"
log "github.com/sirupsen/logrus"
"github.com/kubernetes-incubator/external-dns/endpoint"
"github.com/kubernetes-incubator/external-dns/plan"
"github.com/kubernetes-incubator/external-dns/source"
)
const (
// cloudFlareCreate is a ChangeAction enum value
cloudFlareCreate = "CREATE"
// cloudFlareDelete is a ChangeAction enum value
cloudFlareDelete = "DELETE"
// cloudFlareUpdate is a ChangeAction enum value
cloudFlareUpdate = "UPDATE"
// defaultCloudFlareRecordTTL 1 = automatic
defaultCloudFlareRecordTTL = 1
)
var cloudFlareTypeNotSupported = map[string]bool{
"LOC": true,
"MX": true,
"NS": true,
"SPF": true,
"TXT": true,
"SRV": true,
}
// cloudFlareDNS is the subset of the CloudFlare API that we actually use. Add methods as required. Signatures must match exactly.
type cloudFlareDNS interface {
UserDetails() (cloudflare.User, error)
ZoneIDByName(zoneName string) (string, error)
ListZones(zoneID ...string) ([]cloudflare.Zone, error)
ListZonesContext(ctx context.Context, opts ...cloudflare.ReqOption) (cloudflare.ZonesResponse, error)
DNSRecords(zoneID string, rr cloudflare.DNSRecord) ([]cloudflare.DNSRecord, error)
CreateDNSRecord(zoneID string, rr cloudflare.DNSRecord) (*cloudflare.DNSRecordResponse, error)
DeleteDNSRecord(zoneID, recordID string) error
UpdateDNSRecord(zoneID, recordID string, rr cloudflare.DNSRecord) error
}
type zoneService struct {
service *cloudflare.API
}
func (z zoneService) UserDetails() (cloudflare.User, error) {
return z.service.UserDetails()
}
func (z zoneService) ListZones(zoneID ...string) ([]cloudflare.Zone, error) {
return z.service.ListZones(zoneID...)
}
func (z zoneService) ZoneIDByName(zoneName string) (string, error) {
return z.service.ZoneIDByName(zoneName)
}
func (z zoneService) CreateDNSRecord(zoneID string, rr cloudflare.DNSRecord) (*cloudflare.DNSRecordResponse, error) {
return z.service.CreateDNSRecord(zoneID, rr)
}
func (z zoneService) DNSRecords(zoneID string, rr cloudflare.DNSRecord) ([]cloudflare.DNSRecord, error) {
return z.service.DNSRecords(zoneID, rr)
}
func (z zoneService) UpdateDNSRecord(zoneID, recordID string, rr cloudflare.DNSRecord) error {
return z.service.UpdateDNSRecord(zoneID, recordID, rr)
}
func (z zoneService) DeleteDNSRecord(zoneID, recordID string) error {
return z.service.DeleteDNSRecord(zoneID, recordID)
}
func (z zoneService) ListZonesContext(ctx context.Context, opts ...cloudflare.ReqOption) (cloudflare.ZonesResponse, error) {
return z.service.ListZonesContext(ctx, opts...)
}
// CloudFlareProvider is an implementation of Provider for CloudFlare DNS.
type CloudFlareProvider struct {
Client cloudFlareDNS
// only consider hosted zones managing domains ending in this suffix
domainFilter DomainFilter
zoneIDFilter ZoneIDFilter
proxiedByDefault bool
DryRun bool
PaginationOptions cloudflare.PaginationOptions
}
// cloudFlareChange differentiates between ChangActions
type cloudFlareChange struct {
Action string
ResourceRecordSet []cloudflare.DNSRecord
}
// NewCloudFlareProvider initializes a new CloudFlare DNS based Provider.
func NewCloudFlareProvider(domainFilter DomainFilter, zoneIDFilter ZoneIDFilter, zonesPerPage int, proxiedByDefault bool, dryRun bool) (*CloudFlareProvider, error) {
// initialize via chosen auth method and returns new API object
var (
config *cloudflare.API
err error
)
if os.Getenv("CF_API_TOKEN") != "" {
config, err = cloudflare.NewWithAPIToken(os.Getenv("CF_API_TOKEN"))
} else {
config, err = cloudflare.New(os.Getenv("CF_API_KEY"), os.Getenv("CF_API_EMAIL"))
}
if err != nil {
return nil, fmt.Errorf("failed to initialize cloudflare provider: %v", err)
}
provider := &CloudFlareProvider{
//Client: config,
Client: zoneService{config},
domainFilter: domainFilter,
zoneIDFilter: zoneIDFilter,
proxiedByDefault: proxiedByDefault,
DryRun: dryRun,
PaginationOptions: cloudflare.PaginationOptions{
PerPage: zonesPerPage,
Page: 1,
},
}
return provider, nil
}
// Zones returns the list of hosted zones.
func (p *CloudFlareProvider) Zones() ([]cloudflare.Zone, error) {
result := []cloudflare.Zone{}
ctx := context.TODO()
p.PaginationOptions.Page = 1
for {
zonesResponse, err := p.Client.ListZonesContext(ctx, cloudflare.WithPagination(p.PaginationOptions))
if err != nil {
return nil, err
}
for _, zone := range zonesResponse.Result {
if !p.domainFilter.Match(zone.Name) {
continue
}
if !p.zoneIDFilter.Match(zone.ID) {
continue
}
result = append(result, zone)
}
if p.PaginationOptions.Page == zonesResponse.ResultInfo.TotalPages {
break
}
p.PaginationOptions.Page++
}
return result, nil
}
// Records returns the list of records.
func (p *CloudFlareProvider) Records() ([]*endpoint.Endpoint, error) {
zones, err := p.Zones()
if err != nil {
return nil, err
}
endpoints := []*endpoint.Endpoint{}
for _, zone := range zones {
records, err := p.Client.DNSRecords(zone.ID, cloudflare.DNSRecord{})
if err != nil {
return nil, err
}
// As CloudFlare does not support "sets" of targets, but instead returns
// a single entry for each name/type/target, we have to group by name
// and record to allow the planner to calculate the correct plan. See #992.
endpoints = append(endpoints, groupByNameAndType(records)...)
}
return endpoints, nil
}
// ApplyChanges applies a given set of changes in a given zone.
func (p *CloudFlareProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
proxiedByDefault := p.proxiedByDefault
combinedChanges := make([]*cloudFlareChange, 0, len(changes.Create)+len(changes.UpdateNew)+len(changes.Delete))
combinedChanges = append(combinedChanges, newCloudFlareChanges(cloudFlareCreate, changes.Create, proxiedByDefault)...)
combinedChanges = append(combinedChanges, newCloudFlareChanges(cloudFlareUpdate, changes.UpdateNew, proxiedByDefault)...)
combinedChanges = append(combinedChanges, newCloudFlareChanges(cloudFlareDelete, changes.Delete, proxiedByDefault)...)
return p.submitChanges(combinedChanges)
}
// submitChanges takes a zone and a collection of Changes and sends them as a single transaction.
func (p *CloudFlareProvider) submitChanges(changes []*cloudFlareChange) error {
// return early if there is nothing to change
if len(changes) == 0 {
return nil
}
zones, err := p.Zones()
if err != nil {
return err
}
// separate into per-zone change sets to be passed to the API.
changesByZone := p.changesByZone(zones, changes)
for zoneID, changes := range changesByZone {
records, err := p.Client.DNSRecords(zoneID, cloudflare.DNSRecord{})
if err != nil {
return fmt.Errorf("could not fetch records from zone, %v", err)
}
for _, change := range changes {
logFields := log.Fields{
"record": change.ResourceRecordSet[0].Name,
"type": change.ResourceRecordSet[0].Type,
"ttl": change.ResourceRecordSet[0].TTL,
"targets": len(change.ResourceRecordSet),
"action": change.Action,
"zone": zoneID,
}
log.WithFields(logFields).Info("Changing record.")
if p.DryRun {
continue
}
recordIDs := p.getRecordIDs(records, change.ResourceRecordSet[0])
// to simplify bookkeeping for multiple records, an update is executed as delete+create
if change.Action == cloudFlareDelete || change.Action == cloudFlareUpdate {
for _, recordID := range recordIDs {
err := p.Client.DeleteDNSRecord(zoneID, recordID)
if err != nil {
log.WithFields(logFields).Errorf("failed to delete record: %v", err)
}
}
}
if change.Action == cloudFlareCreate || change.Action == cloudFlareUpdate {
for _, record := range change.ResourceRecordSet {
_, err := p.Client.CreateDNSRecord(zoneID, record)
if err != nil {
log.WithFields(logFields).Errorf("failed to create record: %v", err)
}
}
}
}
}
return nil
}
// changesByZone separates a multi-zone change into a single change per zone.
func (p *CloudFlareProvider) changesByZone(zones []cloudflare.Zone, changeSet []*cloudFlareChange) map[string][]*cloudFlareChange {
changes := make(map[string][]*cloudFlareChange)
zoneNameIDMapper := zoneIDName{}
for _, z := range zones {
zoneNameIDMapper.Add(z.ID, z.Name)
changes[z.ID] = []*cloudFlareChange{}
}
for _, c := range changeSet {
zoneID, _ := zoneNameIDMapper.FindZone(c.ResourceRecordSet[0].Name)
if zoneID == "" {
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected ", c.ResourceRecordSet[0].Name)
continue
}
changes[zoneID] = append(changes[zoneID], c)
}
return changes
}
func (p *CloudFlareProvider) getRecordIDs(records []cloudflare.DNSRecord, record cloudflare.DNSRecord) []string {
recordIDs := make([]string, 0)
for _, zoneRecord := range records {
if zoneRecord.Name == record.Name && zoneRecord.Type == record.Type {
recordIDs = append(recordIDs, zoneRecord.ID)
}
}
sort.Strings(recordIDs)
return recordIDs
}
// newCloudFlareChanges returns a collection of Changes based on the given records and action.
func newCloudFlareChanges(action string, endpoints []*endpoint.Endpoint, proxiedByDefault bool) []*cloudFlareChange {
changes := make([]*cloudFlareChange, 0, len(endpoints))
for _, endpoint := range endpoints {
changes = append(changes, newCloudFlareChange(action, endpoint, proxiedByDefault))
}
return changes
}
func newCloudFlareChange(action string, endpoint *endpoint.Endpoint, proxiedByDefault bool) *cloudFlareChange {
ttl := defaultCloudFlareRecordTTL
proxied := shouldBeProxied(endpoint, proxiedByDefault)
if endpoint.RecordTTL.IsConfigured() {
ttl = int(endpoint.RecordTTL)
}
resourceRecordSet := make([]cloudflare.DNSRecord, len(endpoint.Targets))
for i := range endpoint.Targets {
resourceRecordSet[i] = cloudflare.DNSRecord{
Name: endpoint.DNSName,
TTL: ttl,
Proxied: proxied,
Type: endpoint.RecordType,
Content: endpoint.Targets[i],
}
}
return &cloudFlareChange{
Action: action,
ResourceRecordSet: resourceRecordSet,
}
}
func shouldBeProxied(endpoint *endpoint.Endpoint, proxiedByDefault bool) bool {
proxied := proxiedByDefault
for _, v := range endpoint.ProviderSpecific {
if v.Name == source.CloudflareProxiedKey {
b, err := strconv.ParseBool(v.Value)
if err != nil {
log.Errorf("Failed to parse annotation [%s]: %v", source.CloudflareProxiedKey, err)
} else {
proxied = b
}
break
}
}
if cloudFlareTypeNotSupported[endpoint.RecordType] || strings.Contains(endpoint.DNSName, "*") {
proxied = false
}
return proxied
}
func groupByNameAndType(records []cloudflare.DNSRecord) []*endpoint.Endpoint {
endpoints := []*endpoint.Endpoint{}
// group supported records by name and type
groups := map[string][]cloudflare.DNSRecord{}
for _, r := range records {
if !supportedRecordType(r.Type) {
continue
}
groupBy := r.Name + r.Type
if _, ok := groups[groupBy]; !ok {
groups[groupBy] = []cloudflare.DNSRecord{}
}
groups[groupBy] = append(groups[groupBy], r)
}
// create single endpoint with all the targets for each name/type
for _, records := range groups {
targets := make([]string, len(records))
for i, record := range records {
targets[i] = record.Content
}
endpoints = append(endpoints,
endpoint.NewEndpointWithTTL(
records[0].Name,
records[0].Type,
endpoint.TTL(records[0].TTL),
targets...).
WithProviderSpecific(source.CloudflareProxiedKey, strconv.FormatBool(records[0].Proxied)))
}
return endpoints
}
| [
"\"CF_API_TOKEN\"",
"\"CF_API_TOKEN\"",
"\"CF_API_KEY\"",
"\"CF_API_EMAIL\""
]
| []
| [
"CF_API_EMAIL",
"CF_API_KEY",
"CF_API_TOKEN"
]
| [] | ["CF_API_EMAIL", "CF_API_KEY", "CF_API_TOKEN"] | go | 3 | 0 | |
slam/dataset/rosbag_dataset.py | import logging
from dataclasses import MISSING
from pathlib import Path
from typing import Optional, Tuple
import os
from torch.utils.data import IterableDataset
import numpy as np
from hydra.conf import field, dataclass
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig, OmegaConf
from slam.common.projection import SphericalProjector
from slam.common.utils import assert_debug, remove_nan
from slam.dataset import DatasetLoader, DatasetConfig
try:
import rosbag
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2, PointField
_with_rosbag = True
except ImportError:
_with_rosbag = False
if _with_rosbag:
@dataclass
class RosbagConfig(DatasetConfig):
"""Config for a Rosbag Dataset"""
dataset: str = "rosbag"
file_path: str = field(
default_factory=lambda: "" if not "ROSBAG_PATH" in os.environ else os.environ["ROSBAG_PATH"])
main_topic: str = "numpy_pc" # The Key of the main topic (which determines the number of frames)
xyz_fields: str = "xyz"
accumulate_scans: bool = False # Whether to accumulate the pointcloud messages (in case of raw sensor data)
frame_size: int = 60 # The number of accumulated message which constitute a frame
topic_mapping: dict = field(default_factory=lambda: {})
lidar_height: int = 720
lidar_width: int = 720
up_fov: float = 45.
down_fov: float = -45.
class RosbagDataset(IterableDataset):
"""A Dataset which wraps a RosBag
Note:
The dataset can only read data sequentially, and will raise an error when two calls are not consecutives
Args:
file_path (str): The path on disk to the rosbag
main_topic (str): The name of the main topic (which sets the number of frames to be extracted)
frame_size (int): The number of messages to accumulate in a frame
topic_mapping (dict): The mapping topic name to key in the data_dict
"""
def _lazy_initialization(self, prefix: str = ""):
if not self.initialized:
logging.info(f"[RosbagDataset]{prefix}Loading ROSBAG {self.file_path}. May take some time")
self.rosbag = rosbag.Bag(self.file_path, "r")
logging.info(f"Done.")
topic_info = self.rosbag.get_type_and_topic_info()
for topic in self.topic_mapping:
assert_debug(topic in topic_info.topics,
f"{topic} is not a topic of the rosbag "
f"(existing topics : {list(topic_info.topics.keys())}")
self._len = self.rosbag.get_message_count(self.main_topic) // self._frame_size
self.initialized = True
def init(self):
self._lazy_initialization()
def __init__(self, config: RosbagConfig, file_path: str, main_topic: str, frame_size: int,
topic_mapping: Optional[dict] = None):
self.config = config
self.rosbag = None
self.initialized = False
assert_debug(Path(file_path).exists(), f"The path to {file_path} does not exist.")
self.file_path = file_path
self.topic_mapping = topic_mapping if topic_mapping is not None else {}
if main_topic not in self.topic_mapping:
self.topic_mapping[main_topic] = "numpy_pc"
self.main_topic: str = main_topic
self.frame_size = frame_size
self._frame_size: int = frame_size if self.config.accumulate_scans else 1
self._len = -1 #
self._idx = 0
self._topics = list(topic_mapping.keys())
self.__iter = None
def __iter__(self):
self._lazy_initialization("[ITER]")
self.__iter = self.rosbag.read_messages(self._topics)
self._idx = 0
return self
@staticmethod
def decode_pointcloud(msg: pc2.PointCloud2, timestamp, xyz_fieldname: str = "xyz") -> Tuple[
Optional[np.ndarray], Optional[np.ndarray]]:
assert_debug("PointCloud2" in msg._type)
pc = np.array(list(pc2.read_points(msg, field_names=xyz_fieldname)))
timestamps = np.ones((pc.shape[0],),
dtype=np.float64) * (float(timestamp.secs * 10e9) + timestamp.nsecs)
return pc, timestamps
def aggregate_messages(self, data_dict: dict):
"""Aggregates the point clouds of the main topic"""
main_key = self.topic_mapping[self.main_topic]
pcs = data_dict[main_key]
data_dict[main_key] = np.concatenate(pcs, axis=0)
timestamps_topic = f"{main_key}_timestamps"
if timestamps_topic in data_dict:
data_dict[timestamps_topic] = np.concatenate(data_dict[timestamps_topic], axis=0)
return data_dict
def _save_topic(self, data_dict, key, topic, msg, t, **kwargs):
if "PointCloud2" in msg._type:
data, timestamps = self.decode_pointcloud(msg, t)
data_dict[key].append(data)
timestamps_key = f"{key}_timestamps"
if timestamps_key not in data_dict:
data_dict[timestamps_key] = []
data_dict[timestamps_key].append(timestamps)
def __getitem__(self, index) -> dict:
self._lazy_initialization("[GETITEM]")
assert_debug(index == self._idx, "A RosbagDataset does not support Random access")
assert isinstance(self.config, RosbagConfig)
if self.__iter is None:
self.__iter__()
data_dict = {key: [] for key in self.topic_mapping.values()}
main_topic_key = self.topic_mapping[self.main_topic]
# Append Messages until the main topic has the required number of messages
while len(data_dict[main_topic_key]) < self._frame_size:
topic, msg, t = next(self.__iter)
_key = self.topic_mapping[topic]
self._save_topic(data_dict, _key, topic, msg, t, frame_index=index)
self._idx += 1
# Aggregate data
data_dict = self.aggregate_messages(data_dict)
return data_dict
def __next__(self):
return self[self._idx]
def __len__(self):
self._lazy_initialization("[LEN]")
return self._len
def close(self):
if self.initialized:
if self.rosbag is not None:
self.rosbag.close()
del self.rosbag
self.rosbag = None
self.initialized = False
self._len = -1
self._idx = 0
self.__iter = None
def __del__(self):
self.close()
# Hydra -- stores a RosbagConfig `rosbag` in the `dataset` group
cs = ConfigStore.instance()
cs.store(group="dataset", name="rosbag", node=RosbagConfig)
class RosbagDatasetConfiguration(DatasetLoader):
"""Returns the configuration of a Dataset built for ROS"""
def __init__(self, config: RosbagConfig, **kwargs):
if isinstance(config, DictConfig):
config = RosbagConfig(**config)
super().__init__(config)
@classmethod
def max_num_workers(cls):
return 1
def projector(self) -> SphericalProjector:
return SphericalProjector(height=self.config.lidar_height, width=self.config.lidar_width,
up_fov=self.config.up_fov, down_fov=self.config.down_fov)
def sequences(self):
assert isinstance(self.config, RosbagConfig)
file_path = self.config.file_path
dataset = RosbagDataset(self.config, file_path, self.config.main_topic,
self.config.frame_size,
OmegaConf.to_container(self.config.topic_mapping) if isinstance(
self.config.topic_mapping, DictConfig) else self.config.topic_mapping)
return ([dataset], [Path(file_path).stem]), None, None, lambda x: x
def get_ground_truth(self, sequence_name):
"""No ground truth can be read from the ROSBAG"""
return None
| []
| []
| [
"ROSBAG_PATH"
]
| [] | ["ROSBAG_PATH"] | python | 1 | 0 | |
infra/templates/osdu-r3-mvp/monitoring_resources/tests/unit/unit_test.go | // Copyright © Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"os"
"testing"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/microsoft/cobalt/test-harness/infratests"
)
var tfOptions = &terraform.Options{
TerraformDir: "../../",
Upgrade: true,
Vars: map[string]interface{}{
"resource_group_location": region,
"prefix": prefix,
},
BackendConfig: map[string]interface{}{
"storage_account_name": os.Getenv("TF_VAR_remote_state_account"),
"container_name": os.Getenv("TF_VAR_remote_state_container"),
},
}
func TestTemplate(t *testing.T) {
expectedAppDevResourceGroup := asMap(t, `{
"location": "`+region+`"
}`)
resourceDescription := infratests.ResourceDescription{
"azurerm_resource_group.main": expectedAppDevResourceGroup,
}
testFixture := infratests.UnitTestFixture{
GoTest: t,
TfOptions: tfOptions,
Workspace: workspace,
PlanAssertions: nil,
ExpectedResourceCount: 28,
ExpectedResourceAttributeValues: resourceDescription,
}
infratests.RunUnitTests(&testFixture)
}
| [
"\"TF_VAR_remote_state_account\"",
"\"TF_VAR_remote_state_container\""
]
| []
| [
"TF_VAR_remote_state_account",
"TF_VAR_remote_state_container"
]
| [] | ["TF_VAR_remote_state_account", "TF_VAR_remote_state_container"] | go | 2 | 0 | |
resource/config/vpnepaprofile.go | /*
* Copyright (c) 2021 Citrix Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
/**
* Configuration for Epa profile resource.
*/
type Vpnepaprofile struct {
/**
* name of device profile
*/
Name string `json:"name,omitempty"`
/**
* filename of the deviceprofile data xml
*/
Filename string `json:"filename,omitempty"`
/**
* deviceprofile data xml
*/
Data string `json:"data,omitempty"`
}
| []
| []
| []
| [] | [] | go | null | null | null |
bin/get_attackdetection_rules.py | from __future__ import absolute_import, division, print_function, unicode_literals
import os,sys
import time
import xml.etree.ElementTree as ET
import json
import requests
splunkhome = os.environ['SPLUNK_HOME']
sys.path.append(os.path.join(splunkhome, 'etc', 'apps', 'DA-ESS-MitreContent', 'lib'))
from seynurlib.validation import *
from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators
from splunklib.six.moves import range
APPCONTEXT='DA-ESS-MitreContent'
APIENDPOINT='https://api.seynur.com/v1/attack-detection'
APILOOKUPFILE='mitre_api_rule_technique_lookup.csv'
@Configuration()
class GetAttackDetectionRulesCommand(GeneratingCommand):
def getApiKey(self):
print("a")
sp = self.service.storage_passwords
print("b")
result_stream = sp.get(name="attackdetection_apikey",app=APPCONTEXT)['body'].read().decode("utf-8")
xmlroot = ET.fromstring(result_stream)
apikey = ''
for elem in xmlroot.findall(".//*[@name='clear_password']"):
apikey=elem.text
return apikey
def getRulesFromApi(self,api_key):
url=APIENDPOINT
headers = {'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache",
'apikey' : '{}'.format(api_key)}
response = requests.request("POST", url, headers=headers)
try:
rules = json.loads(response.text)
except:
if(response.status_code==200):
raise Exception("GetAttackDetectionRules Exception: Unable to authenticate request. Please verify your API Key.")
return rules
def addSavedSearch(self, rule_name, description, spl, technique_id, security_domain, severity, fields):
rule_title = rule_label = rule_name.replace("- Rule","").strip()
rule_title += " - For fields:"
for i in fields.split(","):
rule_title += " $" + i + "$"
rule_description = description + " - MITRE ATT&CK Techniques: " + technique_id
kwargs_createsearch = {"description": rule_description,
"disabled": 1,
"action.alert_manager.param.title": "$name$",
"action.correlationsearch.enabled": 1,
"action.correlationsearch.label": rule_label,
"alert.suppress" : 0,
"cron_schedule" : "*/5 * * * *",
"dispatch.earliest_time" : "-24h",
"dispatch.latest_time" : "now",
"action.customsearchbuilder.spec" : "{}",
"action.notable": 1,
"action.notable.param.security_domain": security_domain,
"action.notable.param.severity": severity,
"action.notable.param.nes_fields": fields,
"action.notable.param.rule_title": rule_title,
"action.notable.param.rule_description": rule_description}
self.service.saved_searches.create(rule_name, spl, **kwargs_createsearch)
def createLookupGenerationString(self, rules):
rule_count = len(rules)
base_case_string = 'count={}, "{}"'
rule_name_case_string = ''
technique_id_case_string = ''
for rule in rules:
rule_name_case_string = rule_name_case_string + "," + base_case_string.format(rule['id'], rule['rule_name'])
technique_id_case_string = technique_id_case_string + "," + base_case_string.format(rule['id'], rule['technique_id'])
rule_name_case_string = rule_name_case_string[1:]
technique_id_case_string = technique_id_case_string[1:]
base_rule_name_string = '| eval rule_name=case({})'
base_technique_id_string = '| eval technique_id=case({})'
rule_name_string = base_rule_name_string.format(rule_name_case_string)
technique_id_string = base_technique_id_string.format(technique_id_case_string)
base_search_string = '| makeresults count={} | streamstats count {} {} | table rule_name, technique_id | outputlookup ' + APILOOKUPFILE
search_string = base_search_string.format(rule_count, rule_name_string, technique_id_string)
return search_string
def createLookup(self, rules):
lookup_gen_search = self.createLookupGenerationString(rules)
kwargs_export = {}
search_results = self.service.jobs.export(lookup_gen_search, **kwargs_export)
return search_results
def generate(self):
try:
api_key = self.getApiKey()
if not is_alphanumeric(api_key, 32):
text = 'GetAttackDetectionRules Error: Unable to send request. The API key must be a 32 character string comprised of alphanumeric characters.'
self.logger.error(text)
else:
api_rules = self.getRulesFromApi(api_key)
splunk_rule_names = []
for search in self.service.saved_searches:
splunk_rule_names.append(search.name)
for api_rule in api_rules:
if api_rule['rule_name'] not in splunk_rule_names :
self.addSavedSearch(api_rule['rule_name'],
api_rule['description'],
api_rule['spl'],
api_rule['technique_id'],
api_rule['security_domain'],
api_rule['severity'],
api_rule['fields'])
self.createLookup(api_rules)
self.logger.info("Attack Detection API: Successfully completed.")
text = 'Attack Detection API: Successfully completed.'
except: # catch *all* exceptions
t, value, tb = sys.exc_info()
self.logger.error( "Attack Detection API: EXCEPTION %s: %s" % (t,value) )
text = 'Attack Detection API: EXCEPTION %s: %s' % (t,value)
yield {'_time': time.time(), '_raw': text}
dispatch(GetAttackDetectionRulesCommand, sys.argv, sys.stdin, sys.stdout, __name__)
| []
| []
| [
"SPLUNK_HOME"
]
| [] | ["SPLUNK_HOME"] | python | 1 | 0 | |
examples/postsFromMarkdown/main.go | // The postFromMarkdown command shows how to use the syncPost command to upload externally-managed
// markdown posts to slab.
// The token is expected to be located in and environment variable called `SLAB_TOKEN`
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/VEVO/slab-go/slab"
)
func main() {
topicID := os.Getenv("SLAB_TOPIC_ID")
slabToken := os.Getenv("SLAB_TOKEN")
c := slab.NewClient(&http.Client{Timeout: time.Duration(10 * time.Second)}, slabToken)
// Pulling some content from a url just to have an example
resp, err := http.Get("https://raw.githubusercontent.com/VEVO/slab-go/master/README.md")
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
p, err := c.Post.Sync("slabgoREADME", string(body), "https://github.com/VEVO/slab-go/blob/master/README.md", "https://github.com/VEVO/slab-go/blob/master/README.md", "MARKDOWN")
if err != nil {
panic(err)
}
fmt.Printf("Post id is: %s\nPost title is: %s\nPost content is:\n%s\nPost version: %d\n", p.ID, p.Title, *p.Content, p.Version)
if topicID != "" {
if err := c.Post.AddTopic(p.ID, topicID); err != nil {
panic(err)
}
}
}
| [
"\"SLAB_TOPIC_ID\"",
"\"SLAB_TOKEN\""
]
| []
| [
"SLAB_TOKEN",
"SLAB_TOPIC_ID"
]
| [] | ["SLAB_TOKEN", "SLAB_TOPIC_ID"] | go | 2 | 0 | |
contrib/gitian-build.py | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2019-2020 The xnode developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup_linux():
global args, workdir
if os.path.isfile('/usr/bin/apt-get'):
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', os.environ['USER']])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
setup_repos()
elif args.is_fedora:
pkgmgr = 'dnf'
repourl = 'https://download.docker.com/linux/fedora/docker-ce.repo'
elif args.is_centos:
pkgmgr = 'yum'
repourl = 'https://download.docker.com/linux/centos/docker-ce.repo'
if args.is_fedora or args.is_centos:
programs = ['ruby', 'make', 'wget', 'curl']
if args.kvm:
print('KVM not supported with Fedora/CentOS yet.')
sys.exit(1)
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
user = os.environ['USER']
dockers = ['docker-ce', 'docker-ce-cli', 'containerd.io']
if args.is_fedora:
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'dnf-plugins-core'])
subprocess.check_call(['sudo', pkgmgr, 'config-manager', '--add-repo', repourl])
elif args.is_centos:
reqs = ['yum-utils', 'device-mapper-persistent-data', 'lvm2']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + reqs)
subprocess.check_call(['sudo', 'yum-config-manager', '--add-repo', repourl])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + dockers)
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', user])
subprocess.check_call(['sudo', 'systemctl', 'enable', 'docker'])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
subprocess.check_call(['sudo', 'systemctl', 'start', 'docker'])
else:
print('LXC not supported with Fedora/CentOS yet.')
sys.exit(1)
if args.is_fedora:
programs += ['git']
if args.is_centos:
# CentOS ships with an insanely outdated version of git that is no longer compatible with gitian builds
# Check current version and update if necessary
oldgit = b'2.' not in subprocess.check_output(['git', '--version'])
if oldgit:
subprocess.check_call(['sudo', pkgmgr, 'remove', '-y', 'git*'])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'https://centos7.iuscommunity.org/ius-release.rpm'])
programs += ['git2u-all']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + programs)
setup_repos()
else:
print('Unsupported system/OS type.')
sys.exit(1)
def setup_darwin():
global args, workdir
programs = []
if not os.path.isfile('/usr/local/bin/wget'):
programs += ['wget']
if not os.path.isfile('/usr/local/bin/git'):
programs += ['git']
if not os.path.isfile('/usr/local/bin/gsha256sum'):
programs += ['coreutils']
if args.docker:
print('Experimental setup for macOS host')
if len(programs) > 0:
subprocess.check_call(['brew', 'install'] + programs)
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
elif args.kvm or not args.docker:
print('KVM and LXC are not supported under macOS at this time.')
sys.exit(0)
setup_repos()
def setup_repos():
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/xnodevps-project/gitian.sigs.git'])
if not os.path.isdir('xnode-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/xnodevps-project/xnode-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('xnode'):
subprocess.check_call(['git', 'clone', 'https://github.com/xnodevps-project/xnode.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
if args.host_os == 'darwin':
subprocess.check_call(['sed', '-i.old', '/50cacher/d', 'bin/make-base-vm'])
if args.host_os == 'linux':
if args.is_fedora or args.is_centos or args.is_wsl:
subprocess.check_call(['sed', '-i', '/50cacher/d', 'bin/make-base-vm'])
subprocess.check_call(make_image_prog)
subprocess.check_call(['git', 'checkout', 'bin/make-base-vm'])
os.chdir(workdir)
if args.host_os == 'linux':
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
print('Setup complete!')
sys.exit(0)
def build():
global args, workdir
os.makedirs('xnode-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../xnode/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'xnode='+args.commit, '--url', 'xnode='+args.url, '../xnode/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../xnode/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/xnode-*.tar.gz build/out/src/xnode-*.tar.gz ../xnode-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'xnode='+args.commit, '--url', 'xnode='+args.url, '../xnode/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../xnode/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/xnode-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/xnode-*.zip build/out/xnode-*.exe build/out/src/xnode-*.tar.gz ../xnode-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'xnode='+args.commit, '--url', 'xnode='+args.url, '../xnode/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../xnode/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/xnode-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/xnode-*.tar.gz build/out/xnode-*.dmg build/out/src/xnode-*.tar.gz ../xnode-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
# TODO: Skip making signed windows sigs until we actually start producing signed windows binaries
#print('\nSigning ' + args.version + ' Windows')
#subprocess.check_call('cp inputs/xnode-' + args.version + '-win-unsigned.tar.gz inputs/xnode-win-unsigned.tar.gz', shell=True)
#subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../xnode/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../xnode/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call('mv build/out/xnode-*win64-setup.exe ../xnode-binaries/'+args.version, shell=True)
#subprocess.check_call('mv build/out/xnode-*win32-setup.exe ../xnode-binaries/'+args.version, shell=True)
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/xnode-' + args.version + '-osx-unsigned.tar.gz inputs/xnode-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../xnode/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../xnode/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/xnode-osx-signed.dmg ../xnode-binaries/'+args.version+'/xnode-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
os.chdir('gitian.sigs')
commit = False
if os.path.isfile(args.version+'-win-signed/'+args.signer+'/xnode-win-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
commit = True
if os.path.isfile(args.version+'-osx-signed/'+args.signer+'/xnode-dmg-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
commit = True
if commit:
print('\nCommitting '+args.version+' Signed Sigs\n')
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
else:
print('\nNothing to commit\n')
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../xnode/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../xnode/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../xnode/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
# TODO: Skip checking signed windows sigs until we actually start producing signed windows binaries
#print('\nVerifying v'+args.version+' Signed Windows\n')
#if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../xnode/contrib/gitian-descriptors/gitian-win-signer.yml']):
# print('Verifying v'+args.version+' Signed Windows FAILED\n')
# rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../xnode/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/xnodevps-project/xnode', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.host_os = sys.platform
if args.host_os == 'win32' or args.host_os == 'cygwin':
raise Exception('Error: Native Windows is not supported by this script, use WSL')
if args.host_os == 'linux':
if os.environ['USER'] == 'root':
raise Exception('Error: Do not run this script as the root user')
args.is_bionic = False
args.is_fedora = False
args.is_centos = False
args.is_wsl = False
if os.path.isfile('/usr/bin/lsb_release'):
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if os.path.isfile('/etc/fedora-release'):
args.is_fedora = True
if os.path.isfile('/etc/centos-release'):
args.is_centos = True
if os.path.isfile('/proc/version') and open('/proc/version', 'r').read().find('Microsoft'):
args.is_wsl = True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
if args.host_os == 'linux':
setup_linux()
elif args.host_os == 'darwin':
setup_darwin()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
if args.host_os == 'darwin':
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
if args.detach_sign:
args.commit_files = False
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('xnode')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
if not os.path.isdir('../gitian-builder/inputs/xnode'):
os.makedirs('../gitian-builder/inputs/xnode')
os.chdir('../gitian-builder/inputs/xnode')
if not os.path.isdir('.git'):
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
| []
| []
| [
"LXC_GUEST_IP",
"USE_DOCKER",
"USE_LXC",
"USER",
"USE_VBOX",
"GITIAN_HOST_IP",
"PATH"
]
| [] | ["LXC_GUEST_IP", "USE_DOCKER", "USE_LXC", "USER", "USE_VBOX", "GITIAN_HOST_IP", "PATH"] | python | 7 | 0 | |
logger/default.go | package logger
import (
"context"
"fmt"
"os"
"runtime"
"sort"
"strings"
"sync"
"time"
dlog "github.com/panovateam/go-micro/debug/log"
)
func init() {
lvl, err := GetLevel(os.Getenv("MICRO_LOG_LEVEL"))
if err != nil {
lvl = InfoLevel
}
DefaultLogger = NewHelper(NewLogger(WithLevel(lvl)))
}
type defaultLogger struct {
sync.RWMutex
opts Options
}
// Init(opts...) should only overwrite provided options
func (l *defaultLogger) Init(opts ...Option) error {
for _, o := range opts {
o(&l.opts)
}
return nil
}
func (l *defaultLogger) String() string {
return "default"
}
func (l *defaultLogger) Fields(fields map[string]interface{}) Logger {
l.Lock()
l.opts.Fields = copyFields(fields)
l.Unlock()
return l
}
func copyFields(src map[string]interface{}) map[string]interface{} {
dst := make(map[string]interface{}, len(src))
for k, v := range src {
dst[k] = v
}
return dst
}
// logCallerfilePath returns a package/file:line description of the caller,
// preserving only the leaf directory name and file name.
func logCallerfilePath(loggingFilePath string) string {
// To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
idx := strings.LastIndexByte(loggingFilePath, '/')
if idx == -1 {
return loggingFilePath
}
idx = strings.LastIndexByte(loggingFilePath[:idx], '/')
if idx == -1 {
return loggingFilePath
}
return loggingFilePath[idx+1:]
}
func (l *defaultLogger) Log(level Level, v ...interface{}) {
// TODO decide does we need to write message if log level not used?
if !l.opts.Level.Enabled(level) {
return
}
l.RLock()
fields := copyFields(l.opts.Fields)
l.RUnlock()
fields["level"] = level.String()
if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok {
fields["file"] = fmt.Sprintf("%s:%d", logCallerfilePath(file), line)
}
rec := dlog.Record{
Timestamp: time.Now(),
Message: fmt.Sprint(v...),
Metadata: make(map[string]string, len(fields)),
}
keys := make([]string, 0, len(fields))
for k, v := range fields {
keys = append(keys, k)
rec.Metadata[k] = fmt.Sprintf("%v", v)
}
sort.Strings(keys)
metadata := ""
for _, k := range keys {
metadata += fmt.Sprintf(" %s=%v", k, fields[k])
}
dlog.DefaultLog.Write(rec)
t := rec.Timestamp.Format("2006-01-02 15:04:05")
fmt.Printf("%s %s %v\n", t, metadata, rec.Message)
}
func (l *defaultLogger) Logf(level Level, format string, v ...interface{}) {
// TODO decide does we need to write message if log level not used?
if level < l.opts.Level {
return
}
l.RLock()
fields := copyFields(l.opts.Fields)
l.RUnlock()
fields["level"] = level.String()
if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok {
fields["file"] = fmt.Sprintf("%s:%d", logCallerfilePath(file), line)
}
rec := dlog.Record{
Timestamp: time.Now(),
Message: fmt.Sprintf(format, v...),
Metadata: make(map[string]string, len(fields)),
}
keys := make([]string, 0, len(fields))
for k, v := range fields {
keys = append(keys, k)
rec.Metadata[k] = fmt.Sprintf("%v", v)
}
sort.Strings(keys)
metadata := ""
for _, k := range keys {
metadata += fmt.Sprintf(" %s=%v", k, fields[k])
}
dlog.DefaultLog.Write(rec)
t := rec.Timestamp.Format("2006-01-02 15:04:05")
fmt.Printf("%s %s %v\n", t, metadata, rec.Message)
}
func (n *defaultLogger) Options() Options {
// not guard against options Context values
n.RLock()
opts := n.opts
opts.Fields = copyFields(n.opts.Fields)
n.RUnlock()
return opts
}
// NewLogger builds a new logger based on options
func NewLogger(opts ...Option) Logger {
// Default options
options := Options{
Level: InfoLevel,
Fields: make(map[string]interface{}),
Out: os.Stderr,
CallerSkipCount: 2,
Context: context.Background(),
}
l := &defaultLogger{opts: options}
if err := l.Init(opts...); err != nil {
l.Log(FatalLevel, err)
}
return l
}
| [
"\"MICRO_LOG_LEVEL\""
]
| []
| [
"MICRO_LOG_LEVEL"
]
| [] | ["MICRO_LOG_LEVEL"] | go | 1 | 0 | |
tools/wasme/cli/pkg/cache/notifying_cache.go | package cache
import (
"crypto/md5"
"fmt"
"os"
"github.com/hashicorp/go-multierror"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// sends Events to kubernetes when images are added to the cache
type Notifier struct {
kube kubernetes.Interface
wasmeNamespace string
cacheName string
}
func NewNotifier(kube kubernetes.Interface, wasmeNamespace string, cacheName string) *Notifier {
return &Notifier{kube: kube, wasmeNamespace: wasmeNamespace, cacheName: cacheName}
}
const (
// marked as "true" always, for searching
CacheGlobalLabel = "cache.wasme.io/cache_event"
// ref to the image
CacheImageRefLabel = "cache.wasme.io/image_ref"
Reason_ImageAdded = "ImageAdded"
Reason_ImageError = "ImageError"
)
func (n *Notifier) Notify(err error, image string) error {
var reason, message string
if err != nil {
reason = Reason_ImageError
message = err.Error()
} else {
reason = Reason_ImageAdded
message = fmt.Sprintf("Image %v added successfully", image)
}
_, eventCreateErr := n.kube.CoreV1().Events(n.wasmeNamespace).Create(&v1.Event{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "wasme-cache-event-",
Namespace: n.wasmeNamespace,
Labels: EventLabels(image),
Annotations: EventAnnotations(image),
},
InvolvedObject: v1.ObjectReference{
Kind: "ConfigMap",
Namespace: n.wasmeNamespace,
Name: n.cacheName,
APIVersion: "v1",
},
Reason: reason,
Message: message,
Source: v1.EventSource{
Component: "wasme-cache",
Host: os.Getenv("NODE_HOSTNAME"),
},
})
if eventCreateErr != nil {
return multierror.Append(err, eventCreateErr)
}
return err
}
func EventLabels(image string) map[string]string {
// take hash for valid label name
refLabel := fmt.Sprintf("%x", md5.Sum([]byte(image)))
return map[string]string{
CacheGlobalLabel: "true",
CacheImageRefLabel: refLabel,
}
}
func EventAnnotations(image string) map[string]string {
return map[string]string{
CacheImageRefLabel: image,
}
}
| [
"\"NODE_HOSTNAME\""
]
| []
| [
"NODE_HOSTNAME"
]
| [] | ["NODE_HOSTNAME"] | go | 1 | 0 | |
cmd/utils.go | package cmd
import (
"context"
"encoding/csv"
"fmt"
"os"
"os/signal"
"os/user"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/ernoaapa/eliot/pkg/cmd"
ui "github.com/ernoaapa/eliot/pkg/cmd/ui"
"github.com/ernoaapa/eliot/pkg/discovery"
"github.com/ernoaapa/eliot/pkg/printers"
"github.com/ernoaapa/eliot/pkg/sync"
"github.com/ernoaapa/eliot/pkg/utils"
"github.com/sirupsen/logrus"
"github.com/ernoaapa/eliot/pkg/api"
containers "github.com/ernoaapa/eliot/pkg/api/services/containers/v1"
pods "github.com/ernoaapa/eliot/pkg/api/services/pods/v1"
"github.com/ernoaapa/eliot/pkg/config"
"github.com/ernoaapa/eliot/pkg/fs"
"github.com/ernoaapa/eliot/pkg/runtime"
"github.com/urfave/cli"
)
const (
outputHuman = "human"
outputYaml = "yaml"
)
var (
// GlobalFlags are flags what all commands have common
GlobalFlags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output in logs",
},
cli.BoolFlag{
Name: "quiet",
Usage: "Don't print any progress output",
},
cli.StringFlag{
Name: "output, o",
Usage: fmt.Sprintf("Output format. One of: %s", []string{outputHuman, outputYaml}),
Value: "human",
},
}
)
// GlobalBefore is function what get executed before any commands executes
func GlobalBefore(context *cli.Context) error {
debug := context.GlobalBool("debug")
if debug {
logrus.SetLevel(logrus.DebugLevel)
}
if cmd.IsPipingOut() || context.GlobalBool("quiet") || context.GlobalString("output") != outputHuman {
ui.SetOutput(ui.NewHidden())
} else if debug {
ui.SetOutput(ui.NewDebug())
} else {
ui.SetOutput(ui.NewTerminal())
}
return nil
}
// GetClient creates new cloud API client
func GetClient(config *config.Provider) *api.Client {
uiline := ui.NewLine()
endpoints := config.GetEndpoints()
switch len(endpoints) {
case 0:
uiline.Fatal("No node to connect. You must give node endpoint. E.g. --endpoint=192.168.1.2")
return nil
case 1:
uiline.Loadingf("Connecting to %s (%s)", endpoints[0].Name, endpoints[0].URL)
client := api.NewClient(config.GetNamespace(), endpoints[0])
info, err := client.GetInfo()
if err != nil {
logrus.Debugf("Connection failure: %s", err)
uiline.Fatalf("Failed connect to %s (%s)", endpoints[0].Name, endpoints[0].URL)
}
uiline.Donef("Connected to %s (%s)", info.Hostname, endpoints[0].URL)
return client
default:
uiline.Fatalf("%d node found. You must give target node. E.g. --endpoint=192.168.1.2", len(endpoints))
return nil
}
}
// GetConfig parse yaml config and return the file representation
// In normal cases, you should use GetConfigProvider
func GetConfig(clicontext *cli.Context) *config.Config {
configPath := clicontext.GlobalString("config")
conf, err := config.GetConfig(expandTilde(configPath))
if err != nil {
ui.NewLine().Fatalf("Error while reading configuration file [%s]: %s", configPath, err)
}
return conf
}
// GetConfigProvider return config.Provider to access the current configuration
func GetConfigProvider(clicontext *cli.Context) *config.Provider {
provider := config.NewProvider(GetConfig(clicontext))
if clicontext.GlobalIsSet("namespace") && clicontext.GlobalString("namespace") != "" {
provider.OverrideNamespace(clicontext.GlobalString("namespace"))
}
if clicontext.GlobalIsSet("endpoint") && clicontext.GlobalString("endpoint") != "" {
provider.OverrideEndpoints([]config.Endpoint{{
Name: clicontext.GlobalString("endpoint"),
URL: clicontext.GlobalString("endpoint"),
}})
}
if len(provider.GetEndpoints()) == 0 {
uiline := ui.NewLine().Loading("Discover from network automatically...")
node, err := discovery.Nodes(2 * time.Second)
if err != nil {
uiline.Errorf("Failed to auto-discover node in network: %s", err)
} else {
if len(node) == 0 {
uiline.Warn("No node discovered from network")
} else {
uiline.Donef("Discovered %d node(s) from network", len(node))
}
}
endpoints := []config.Endpoint{}
for _, node := range node {
if len(node.Addresses) > 0 {
endpoints = append(endpoints, config.Endpoint{
Name: node.Hostname,
URL: fmt.Sprintf("%s:%d", utils.GetFirst(node.Addresses, ""), node.GrpcPort),
})
}
}
provider.OverrideEndpoints(endpoints)
}
if clicontext.GlobalIsSet("node") && clicontext.GlobalString("node") != "" {
nodeName := clicontext.GlobalString("node")
endpoint, found := provider.GetEndpointByName(nodeName)
if !found {
ui.NewLine().Errorf("Failed to find node with name %s", nodeName)
}
provider.OverrideEndpoints([]config.Endpoint{endpoint})
}
return provider
}
// UpdateConfig writes config to the config file in yaml format
func UpdateConfig(clicontext *cli.Context, updated *config.Config) error {
configPath := expandTilde(clicontext.GlobalString("config"))
return config.WriteConfig(configPath, updated)
}
// GetLabels return --labels CLI parameter value as string map
func GetLabels(clicontext *cli.Context) map[string]string {
if !clicontext.IsSet("labels") {
return map[string]string{}
}
param := clicontext.String("labels")
values := strings.Split(param, ",")
labels := map[string]string{}
for _, value := range values {
pair := strings.Split(value, "=")
if len(pair) == 2 {
labels[pair[0]] = pair[1]
} else {
ui.NewLine().Fatalf("Invalid --labels parameter [%s]. It must be comma separated key=value list. E.g. '--labels foo=bar,one=two'", param)
}
}
return labels
}
// GetRuntimeClient initialises new runtime client from CLI parameters
func GetRuntimeClient(clicontext *cli.Context, hostname string) runtime.Client {
return runtime.NewContainerdClient(
context.Background(),
clicontext.GlobalDuration("timeout"),
clicontext.GlobalString("containerd"),
clicontext.String("containerd-snapshotter"),
hostname,
)
}
// GetPrinter returns printer for formating resources output
func GetPrinter(clicontext *cli.Context) printers.ResourcePrinter {
switch output := clicontext.GlobalString("output"); output {
case outputHuman:
return printers.NewHumanReadablePrinter()
case outputYaml:
return printers.NewYamlPrinter()
default:
logrus.Fatalf("Unknown output format: %s", output)
return nil
}
}
// MustParseMounts parses a --mount string flags
func MustParseMounts(mounts []string) (result []*containers.Mount) {
for _, str := range mounts {
mount, err := parseMountFlag(str)
if err != nil {
ui.NewLine().Fatalf("Failed to parse --mount flag [%s]: %s", str, err)
}
result = append(result, mount)
}
return result
}
// parseMountFlag parses a mount string in the form "type=foo,source=/path,destination=/target,options=rbind:rw"
func parseMountFlag(m string) (*containers.Mount, error) {
mount := &containers.Mount{}
r := csv.NewReader(strings.NewReader(m))
fields, err := r.Read()
if err != nil {
return mount, err
}
for _, field := range fields {
v := strings.Split(field, "=")
if len(v) != 2 {
return mount, fmt.Errorf("invalid mount specification: expected key=val")
}
key := v[0]
val := v[1]
switch key {
case "type":
mount.Type = val
case "source", "src":
mount.Source = val
case "destination", "dst":
mount.Destination = val
case "options":
mount.Options = strings.Split(val, ":")
default:
return mount, fmt.Errorf("mount option %q not supported", key)
}
}
return mount, nil
}
// MustParseBinds parses a --bind string flags
func MustParseBinds(binds []string) (result []*containers.Mount) {
for _, flag := range binds {
bind, err := ParseBindFlag(flag)
if err != nil {
ui.NewLine().Fatalf("Failed to parse --bind flag: %s", err)
}
result = append(result, bind)
}
return result
}
// MustParseBindFlag is like ParseBindFlag but panics if syntax is invalid
func MustParseBindFlag(b string) *containers.Mount {
m, err := ParseBindFlag(b)
if err != nil {
panic("Invalid mount format: " + b + ". Error: " + err.Error())
}
return m
}
// ParseBindFlag parses a mount string in the form "/var:/var:rshared"
func ParseBindFlag(b string) (*containers.Mount, error) {
parts := strings.Split(b, ":")
if len(parts) < 2 {
return nil, fmt.Errorf("Cannot parse bind, missing ':': %s", b)
}
if len(parts) > 3 {
return nil, fmt.Errorf("Cannot parse bind, too many ':': %s", b)
}
src := parts[0]
dest := parts[1]
opts := []string{"rw", "rbind", "rprivate"}
if len(parts) == 3 {
opts = append(strings.Split(parts[2], ","), "rbind")
}
return &containers.Mount{
Type: "bind",
Destination: dest,
Source: src,
Options: opts,
}, nil
}
// MustParseSyncs parses a sync string in the form "~/local/dir:/data"
func MustParseSyncs(syncs []string) (result []sync.Sync) {
for _, value := range syncs {
sync, err := sync.Parse(value)
if err != nil {
logrus.Fatalf("Error reading sync argument: %s", err)
}
result = append(result, sync)
}
return result
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
func expandTilde(path string) string {
usr, _ := user.Current()
dir := usr.HomeDir
if len(path) >= 2 && path[:2] == "~/" {
return filepath.Join(dir, path[2:])
}
return path
}
// FilterByPodName return new list of Pods which name matches with given podName
func FilterByPodName(source []*pods.Pod, podName string) []*pods.Pod {
for _, pod := range source {
if pod.Metadata.Name == podName {
return []*pods.Pod{
pod,
}
}
}
return []*pods.Pod{}
}
// ForwardAllSignals will listen all kill signals and pass it to the handler
func ForwardAllSignals(handler func(syscall.Signal) error) chan os.Signal {
sigc := make(chan os.Signal, 128)
signal.Notify(sigc)
go func() {
for s := range sigc {
signal := s.(syscall.Signal)
// Doesn't make sense to forward "child process terminates" because it's about this CLI child process
if signal == syscall.SIGCHLD {
continue
}
if err := handler(signal); err != nil {
logrus.WithError(err).Errorf("forward signal %s", s)
}
}
}()
return sigc
}
// GetCurrentDirectory resolves current directory where the command were executed
// Tries different options until find one or fails
func GetCurrentDirectory() string {
for _, path := range []string{".", os.Args[0], os.Getenv("PWD")} {
dir, err := filepath.Abs(filepath.Dir(path))
if err == nil && fs.DirExist(path) {
return dir
}
}
ui.NewLine().Fatal("Failed to resolve current directory")
return ""
}
// First return first non empty "" string or empty ""
func First(values ...string) string {
for _, str := range values {
if str != "" {
return str
}
}
return ""
}
// DropDoubleDash search for double dash (--) and if found
// return arguments after it, otherwise return all arguments
func DropDoubleDash(args []string) []string {
for index, arg := range args {
if arg == "--" {
return args[index+1:]
}
}
return args
}
// ResolveContainerID resolves ContainerID from list of containers.
// If multiple containers, you must define containerName, otherwise it's optional.
func ResolveContainerID(containers []*containers.ContainerStatus, containerName string) (string, error) {
containerCount := len(containers)
if containerCount == 0 {
return "", fmt.Errorf("Pod don't have any containers")
} else if containerCount == 1 {
return containers[0].ContainerID, nil
} else {
if containerName == "" {
return "", fmt.Errorf("Pod contains %d containers, you must define container name", containerCount)
}
for _, status := range containers {
if status.Name == containerName {
return status.ContainerID, nil
}
}
return "", fmt.Errorf("Pod contains %d containers, you must define container name", containerCount)
}
}
// FindRunningContainerID search from Pod definition a containerID by container name
func FindRunningContainerID(pod *pods.Pod, name string) (string, error) {
if pod.Status != nil && len(pod.Status.ContainerStatuses) > 0 {
for _, status := range pod.Status.ContainerStatuses {
if status.Name == name {
return status.ContainerID, nil
}
}
}
return "", fmt.Errorf("Cannot find ContainerID with name %s", name)
}
// StopCatch will close the given channel when receives Stop signal (^C)
func StopCatch(sigc chan os.Signal) {
signal.Stop(sigc)
close(sigc)
}
| [
"\"PWD\""
]
| []
| [
"PWD"
]
| [] | ["PWD"] | go | 1 | 0 | |
vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
//go:generate msgp -unexported -marshal=false -o=span_msgp.go -tests=false
package tracer
import (
"fmt"
"os"
"reflect"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
"github.com/tinylib/msgp/msgp"
"golang.org/x/xerrors"
)
type (
// spanList implements msgp.Encodable on top of a slice of spans.
spanList []*span
// spanLists implements msgp.Decodable on top of a slice of spanList.
// This type is only used in tests.
spanLists []spanList
)
var (
_ ddtrace.Span = (*span)(nil)
_ msgp.Encodable = (*spanList)(nil)
_ msgp.Decodable = (*spanLists)(nil)
)
// errorConfig holds customization options for setting error tags.
type errorConfig struct {
noDebugStack bool
stackFrames uint
stackSkip uint
}
// span represents a computation. Callers must call Finish when a span is
// complete to ensure it's submitted.
type span struct {
sync.RWMutex `msg:"-"`
Name string `msg:"name"` // operation name
Service string `msg:"service"` // service name (i.e. "grpc.server", "http.request")
Resource string `msg:"resource"` // resource name (i.e. "/user?id=123", "SELECT * FROM users")
Type string `msg:"type"` // protocol associated with the span (i.e. "web", "db", "cache")
Start int64 `msg:"start"` // span start time expressed in nanoseconds since epoch
Duration int64 `msg:"duration"` // duration of the span expressed in nanoseconds
Meta map[string]string `msg:"meta,omitempty"` // arbitrary map of metadata
Metrics map[string]float64 `msg:"metrics,omitempty"` // arbitrary map of numeric metrics
SpanID uint64 `msg:"span_id"` // identifier of this span
TraceID uint64 `msg:"trace_id"` // identifier of the root span
ParentID uint64 `msg:"parent_id"` // identifier of the span's direct parent
Error int32 `msg:"error"` // error status of the span; 0 means no errors
noDebugStack bool `msg:"-"` // disables debug stack traces
finished bool `msg:"-"` // true if the span has been submitted to a tracer.
context *spanContext `msg:"-"` // span propagation context
taskEnd func() // ends execution tracer (runtime/trace) task, if started
}
// Context yields the SpanContext for this Span. Note that the return
// value of Context() is still valid after a call to Finish(). This is
// called the span context and it is different from Go's context.
func (s *span) Context() ddtrace.SpanContext { return s.context }
// SetBaggageItem sets a key/value pair as baggage on the span. Baggage items
// are propagated down to descendant spans and injected cross-process. Use with
// care as it adds extra load onto your tracing layer.
func (s *span) SetBaggageItem(key, val string) {
s.context.setBaggageItem(key, val)
}
// BaggageItem gets the value for a baggage item given its key. Returns the
// empty string if the value isn't found in this Span.
func (s *span) BaggageItem(key string) string {
return s.context.baggageItem(key)
}
// SetTag adds a set of key/value metadata to the span.
func (s *span) SetTag(key string, value interface{}) {
s.Lock()
defer s.Unlock()
// We don't lock spans when flushing, so we could have a data race when
// modifying a span as it's being flushed. This protects us against that
// race, since spans are marked `finished` before we flush them.
if s.finished {
return
}
switch key {
case ext.Error:
s.setTagError(value, &errorConfig{})
return
}
if v, ok := value.(bool); ok {
s.setTagBool(key, v)
return
}
if v, ok := value.(string); ok {
s.setMeta(key, v)
return
}
if v, ok := toFloat64(value); ok {
s.setMetric(key, v)
return
}
// not numeric, not a string, not a bool, and not an error
s.setMeta(key, fmt.Sprint(value))
}
// setTagError sets the error tag. It accounts for various valid scenarios.
// This method is not safe for concurrent use.
func (s *span) setTagError(value interface{}, cfg *errorConfig) {
if s.finished {
return
}
switch v := value.(type) {
case bool:
// bool value as per Opentracing spec.
if !v {
s.Error = 0
} else {
s.Error = 1
}
case error:
// if anyone sets an error value as the tag, be nice here
// and provide all the benefits.
s.Error = 1
s.setMeta(ext.ErrorMsg, v.Error())
s.setMeta(ext.ErrorType, reflect.TypeOf(v).String())
if !cfg.noDebugStack {
if cfg.stackFrames == 0 {
s.setMeta(ext.ErrorStack, string(debug.Stack()))
} else {
s.setMeta(ext.ErrorStack, takeStacktrace(cfg.stackFrames, cfg.stackSkip))
}
}
switch v.(type) {
case xerrors.Formatter:
s.setMeta(ext.ErrorDetails, fmt.Sprintf("%+v", v))
case fmt.Formatter:
// pkg/errors approach
s.setMeta(ext.ErrorDetails, fmt.Sprintf("%+v", v))
}
case nil:
// no error
s.Error = 0
default:
// in all other cases, let's assume that setting this tag
// is the result of an error.
s.Error = 1
}
}
// takeStacktrace takes stacktrace
func takeStacktrace(n, skip uint) string {
var builder strings.Builder
pcs := make([]uintptr, n)
// +2 to exclude runtime.Callers and takeStacktrace
numFrames := runtime.Callers(2+int(skip), pcs)
if numFrames == 0 {
return ""
}
frames := runtime.CallersFrames(pcs[:numFrames])
for i := 0; ; i++ {
frame, more := frames.Next()
if i != 0 {
builder.WriteByte('\n')
}
builder.WriteString(frame.Function)
builder.WriteByte('\n')
builder.WriteByte('\t')
builder.WriteString(frame.File)
builder.WriteByte(':')
builder.WriteString(strconv.Itoa(frame.Line))
if !more {
break
}
}
return builder.String()
}
// setMeta sets a string tag. This method is not safe for concurrent use.
func (s *span) setMeta(key, v string) {
if s.Meta == nil {
s.Meta = make(map[string]string, 1)
}
switch key {
case ext.SpanName:
s.Name = v
case ext.ServiceName:
s.Service = v
case ext.ResourceName:
s.Resource = v
case ext.SpanType:
s.Type = v
default:
s.Meta[key] = v
}
}
// setTagBool sets a boolean tag on the span.
func (s *span) setTagBool(key string, v bool) {
switch key {
case ext.AnalyticsEvent:
if v {
s.setMetric(ext.EventSampleRate, 1.0)
} else {
s.setMetric(ext.EventSampleRate, 0.0)
}
case ext.ManualDrop:
if v {
s.setMetric(ext.SamplingPriority, ext.PriorityUserReject)
}
case ext.ManualKeep:
if v {
s.setMetric(ext.SamplingPriority, ext.PriorityUserKeep)
}
default:
if v {
s.setMeta(key, "true")
} else {
s.setMeta(key, "false")
}
}
}
// setMetric sets a numeric tag, in our case called a metric. This method
// is not safe for concurrent use.
func (s *span) setMetric(key string, v float64) {
if s.Metrics == nil {
s.Metrics = make(map[string]float64, 1)
}
switch key {
case ext.SamplingPriority:
// setting sampling priority per spec
s.Metrics[keySamplingPriority] = v
s.context.setSamplingPriority(int(v))
default:
s.Metrics[key] = v
}
}
// Finish closes this Span (but not its children) providing the duration
// of its part of the tracing session.
func (s *span) Finish(opts ...ddtrace.FinishOption) {
t := now()
if len(opts) > 0 {
cfg := ddtrace.FinishConfig{
NoDebugStack: s.noDebugStack,
}
for _, fn := range opts {
fn(&cfg)
}
if !cfg.FinishTime.IsZero() {
t = cfg.FinishTime.UnixNano()
}
if cfg.Error != nil {
s.Lock()
s.setTagError(cfg.Error, &errorConfig{
noDebugStack: cfg.NoDebugStack,
stackFrames: cfg.StackFrames,
stackSkip: cfg.SkipStackFrames,
})
s.Unlock()
}
}
if s.taskEnd != nil {
s.taskEnd()
}
s.finish(t)
}
// SetOperationName sets or changes the operation name.
func (s *span) SetOperationName(operationName string) {
s.Lock()
defer s.Unlock()
s.Name = operationName
}
func (s *span) finish(finishTime int64) {
s.Lock()
defer s.Unlock()
// We don't lock spans when flushing, so we could have a data race when
// modifying a span as it's being flushed. This protects us against that
// race, since spans are marked `finished` before we flush them.
if s.finished {
// already finished
return
}
if s.Duration == 0 {
s.Duration = finishTime - s.Start
}
s.finished = true
if s.context.drop {
// not sampled by local sampler
return
}
s.context.finish()
}
// String returns a human readable representation of the span. Not for
// production, just debugging.
func (s *span) String() string {
lines := []string{
fmt.Sprintf("Name: %s", s.Name),
fmt.Sprintf("Service: %s", s.Service),
fmt.Sprintf("Resource: %s", s.Resource),
fmt.Sprintf("TraceID: %d", s.TraceID),
fmt.Sprintf("SpanID: %d", s.SpanID),
fmt.Sprintf("ParentID: %d", s.ParentID),
fmt.Sprintf("Start: %s", time.Unix(0, s.Start)),
fmt.Sprintf("Duration: %s", time.Duration(s.Duration)),
fmt.Sprintf("Error: %d", s.Error),
fmt.Sprintf("Type: %s", s.Type),
"Tags:",
}
s.RLock()
for key, val := range s.Meta {
lines = append(lines, fmt.Sprintf("\t%s:%s", key, val))
}
for key, val := range s.Metrics {
lines = append(lines, fmt.Sprintf("\t%s:%f", key, val))
}
s.RUnlock()
return strings.Join(lines, "\n")
}
// Format implements fmt.Formatter.
func (s *span) Format(f fmt.State, c rune) {
switch c {
case 's':
fmt.Fprint(f, s.String())
case 'v':
if svc := globalconfig.ServiceName(); svc != "" {
fmt.Fprintf(f, "dd.service=%s ", svc)
}
if tr, ok := internal.GetGlobalTracer().(*tracer); ok {
if tr.config.env != "" {
fmt.Fprintf(f, "dd.env=%s ", tr.config.env)
}
if tr.config.version != "" {
fmt.Fprintf(f, "dd.version=%s ", tr.config.version)
}
} else {
if env := os.Getenv("DD_ENV"); env != "" {
fmt.Fprintf(f, "dd.env=%s ", env)
}
if v := os.Getenv("DD_VERSION"); v != "" {
fmt.Fprintf(f, "dd.version=%s ", v)
}
}
fmt.Fprintf(f, `dd.trace_id="%d" dd.span_id="%d"`, s.TraceID, s.SpanID)
default:
fmt.Fprintf(f, "%%!%c(ddtrace.Span=%v)", c, s)
}
}
const (
keySamplingPriority = "_sampling_priority_v1"
keySamplingPriorityRate = "_sampling_priority_rate_v1"
keyOrigin = "_dd.origin"
keyHostname = "_dd.hostname"
keyRulesSamplerAppliedRate = "_dd.rule_psr"
keyRulesSamplerLimiterRate = "_dd.limit_psr"
keyMeasured = "_dd.measured"
// keyTopLevel is the key of top level metric indicating if a span is top level.
// A top level span is a local root (parent span of the local trace) or the first span of each service.
keyTopLevel = "_dd.top_level"
)
| [
"\"DD_ENV\"",
"\"DD_VERSION\""
]
| []
| [
"DD_VERSION",
"DD_ENV"
]
| [] | ["DD_VERSION", "DD_ENV"] | go | 2 | 0 | |
providers.py | """ Third party api wrappers"""
import os
import json
import nexmo
import africastalking
username = os.getenv('africastalking_username')
api_key = os.getenv('africastalking_api_key')
africastalking.initialize(username, api_key)
sms = africastalking.SMS
class ProvidersWrapper:
""" Class with all the thirdy party helper functions"""
def send_message(number, message):
client = nexmo.Client(key=os.getenv('nexmokey'), secret=os.getenv('nexmosecret'))
response = client.send_message({
'from': 'Nexmo',
'to': number,
'text': message,
})
if response["messages"][0]["status"] != "0":
response = sms.send(message, ['+' + number])
return response
| []
| []
| [
"nexmosecret",
"nexmokey",
"africastalking_username",
"africastalking_api_key"
]
| [] | ["nexmosecret", "nexmokey", "africastalking_username", "africastalking_api_key"] | python | 4 | 0 | |
vendor/src/github.com/lib/pq/conn.go | package pq
import (
"bufio"
"crypto/md5"
"database/sql"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"os/user"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"unicode"
"github.com/lib/pq/oid"
)
// Common error types
var (
ErrNotSupported = errors.New("pq: Unsupported command")
ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction")
ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server")
ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less")
ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly")
errUnexpectedReady = errors.New("unexpected ReadyForQuery")
errNoRowsAffected = errors.New("no RowsAffected available after the empty statement")
errNoLastInsertID = errors.New("no LastInsertId available after the empty statement")
)
type Driver struct{}
func (d *Driver) Open(name string) (driver.Conn, error) {
return Open(name)
}
func init() {
sql.Register("postgres", &Driver{})
}
type parameterStatus struct {
// server version in the same format as server_version_num, or 0 if
// unavailable
serverVersion int
// the current location based on the TimeZone value of the session, if
// available
currentLocation *time.Location
}
type transactionStatus byte
const (
txnStatusIdle transactionStatus = 'I'
txnStatusIdleInTransaction transactionStatus = 'T'
txnStatusInFailedTransaction transactionStatus = 'E'
)
func (s transactionStatus) String() string {
switch s {
case txnStatusIdle:
return "idle"
case txnStatusIdleInTransaction:
return "idle in transaction"
case txnStatusInFailedTransaction:
return "in a failed transaction"
default:
errorf("unknown transactionStatus %d", s)
}
panic("not reached")
}
type Dialer interface {
Dial(network, address string) (net.Conn, error)
DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
}
type defaultDialer struct{}
func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) {
return net.Dial(ntw, addr)
}
func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout(ntw, addr, timeout)
}
type conn struct {
c net.Conn
buf *bufio.Reader
namei int
scratch [512]byte
txnStatus transactionStatus
txnFinish func()
// Save connection arguments to use during CancelRequest.
dialer Dialer
opts values
// Cancellation key data for use with CancelRequest messages.
processID int
secretKey int
parameterStatus parameterStatus
saveMessageType byte
saveMessageBuffer []byte
// If true, this connection is bad and all public-facing functions should
// return ErrBadConn.
bad bool
// If set, this connection should never use the binary format when
// receiving query results from prepared statements. Only provided for
// debugging.
disablePreparedBinaryResult bool
// Whether to always send []byte parameters over as binary. Enables single
// round-trip mode for non-prepared Query calls.
binaryParameters bool
// If true this connection is in the middle of a COPY
inCopy bool
}
// Handle driver-side settings in parsed connection string.
func (cn *conn) handleDriverSettings(o values) (err error) {
boolSetting := func(key string, val *bool) error {
if value, ok := o[key]; ok {
if value == "yes" {
*val = true
} else if value == "no" {
*val = false
} else {
return fmt.Errorf("unrecognized value %q for %s", value, key)
}
}
return nil
}
err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult)
if err != nil {
return err
}
err = boolSetting("binary_parameters", &cn.binaryParameters)
if err != nil {
return err
}
return nil
}
func (cn *conn) handlePgpass(o values) {
// if a password was supplied, do not process .pgpass
if _, ok := o["password"]; ok {
return
}
filename := os.Getenv("PGPASSFILE")
if filename == "" {
// XXX this code doesn't work on Windows where the default filename is
// XXX %APPDATA%\postgresql\pgpass.conf
user, err := user.Current()
if err != nil {
return
}
filename = filepath.Join(user.HomeDir, ".pgpass")
}
fileinfo, err := os.Stat(filename)
if err != nil {
return
}
mode := fileinfo.Mode()
if mode&(0x77) != 0 {
// XXX should warn about incorrect .pgpass permissions as psql does
return
}
file, err := os.Open(filename)
if err != nil {
return
}
defer file.Close()
scanner := bufio.NewScanner(io.Reader(file))
hostname := o["host"]
ntw, _ := network(o)
port := o["port"]
db := o["dbname"]
username := o["user"]
// From: https://github.com/tg/pgpass/blob/master/reader.go
getFields := func(s string) []string {
fs := make([]string, 0, 5)
f := make([]rune, 0, len(s))
var esc bool
for _, c := range s {
switch {
case esc:
f = append(f, c)
esc = false
case c == '\\':
esc = true
case c == ':':
fs = append(fs, string(f))
f = f[:0]
default:
f = append(f, c)
}
}
return append(fs, string(f))
}
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 || line[0] == '#' {
continue
}
split := getFields(line)
if len(split) != 5 {
continue
}
if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
o["password"] = split[4]
return
}
}
}
func (cn *conn) writeBuf(b byte) *writeBuf {
cn.scratch[0] = b
return &writeBuf{
buf: cn.scratch[:5],
pos: 1,
}
}
func Open(name string) (_ driver.Conn, err error) {
return DialOpen(defaultDialer{}, name)
}
func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
// Handle any panics during connection initialization. Note that we
// specifically do *not* want to use errRecover(), as that would turn any
// connection errors into ErrBadConns, hiding the real error message from
// the user.
defer errRecoverNoErrBadConn(&err)
o := make(values)
// A number of defaults are applied here, in this order:
//
// * Very low precedence defaults applied in every situation
// * Environment variables
// * Explicitly passed connection information
o["host"] = "localhost"
o["port"] = "5432"
// N.B.: Extra float digits should be set to 3, but that breaks
// Postgres 8.4 and older, where the max is 2.
o["extra_float_digits"] = "2"
for k, v := range parseEnviron(os.Environ()) {
o[k] = v
}
if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") {
name, err = ParseURL(name)
if err != nil {
return nil, err
}
}
if err := parseOpts(name, o); err != nil {
return nil, err
}
// Use the "fallback" application name if necessary
if fallback, ok := o["fallback_application_name"]; ok {
if _, ok := o["application_name"]; !ok {
o["application_name"] = fallback
}
}
// We can't work with any client_encoding other than UTF-8 currently.
// However, we have historically allowed the user to set it to UTF-8
// explicitly, and there's no reason to break such programs, so allow that.
// Note that the "options" setting could also set client_encoding, but
// parsing its value is not worth it. Instead, we always explicitly send
// client_encoding as a separate run-time parameter, which should override
// anything set in options.
if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
return nil, errors.New("client_encoding must be absent or 'UTF8'")
}
o["client_encoding"] = "UTF8"
// DateStyle needs a similar treatment.
if datestyle, ok := o["datestyle"]; ok {
if datestyle != "ISO, MDY" {
panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v",
"ISO, MDY", datestyle))
}
} else {
o["datestyle"] = "ISO, MDY"
}
// If a user is not provided by any other means, the last
// resort is to use the current operating system provided user
// name.
if _, ok := o["user"]; !ok {
u, err := userCurrent()
if err != nil {
return nil, err
}
o["user"] = u
}
cn := &conn{
opts: o,
dialer: d,
}
err = cn.handleDriverSettings(o)
if err != nil {
return nil, err
}
cn.handlePgpass(o)
cn.c, err = dial(d, o)
if err != nil {
return nil, err
}
cn.ssl(o)
cn.buf = bufio.NewReader(cn.c)
cn.startup(o)
// reset the deadline, in case one was set (see dial)
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
err = cn.c.SetDeadline(time.Time{})
}
return cn, err
}
func dial(d Dialer, o values) (net.Conn, error) {
ntw, addr := network(o)
// SSL is not necessary or supported over UNIX domain sockets
if ntw == "unix" {
o["sslmode"] = "disable"
}
// Zero or not specified means wait indefinitely.
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
seconds, err := strconv.ParseInt(timeout, 10, 0)
if err != nil {
return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
}
duration := time.Duration(seconds) * time.Second
// connect_timeout should apply to the entire connection establishment
// procedure, so we both use a timeout for the TCP connection
// establishment and set a deadline for doing the initial handshake.
// The deadline is then reset after startup() is done.
deadline := time.Now().Add(duration)
conn, err := d.DialTimeout(ntw, addr, duration)
if err != nil {
return nil, err
}
err = conn.SetDeadline(deadline)
return conn, err
}
return d.Dial(ntw, addr)
}
func network(o values) (string, string) {
host := o["host"]
if strings.HasPrefix(host, "/") {
sockPath := path.Join(host, ".s.PGSQL."+o["port"])
return "unix", sockPath
}
return "tcp", net.JoinHostPort(host, o["port"])
}
type values map[string]string
// scanner implements a tokenizer for libpq-style option strings.
type scanner struct {
s []rune
i int
}
// newScanner returns a new scanner initialized with the option string s.
func newScanner(s string) *scanner {
return &scanner{[]rune(s), 0}
}
// Next returns the next rune.
// It returns 0, false if the end of the text has been reached.
func (s *scanner) Next() (rune, bool) {
if s.i >= len(s.s) {
return 0, false
}
r := s.s[s.i]
s.i++
return r, true
}
// SkipSpaces returns the next non-whitespace rune.
// It returns 0, false if the end of the text has been reached.
func (s *scanner) SkipSpaces() (rune, bool) {
r, ok := s.Next()
for unicode.IsSpace(r) && ok {
r, ok = s.Next()
}
return r, ok
}
// parseOpts parses the options from name and adds them to the values.
//
// The parsing code is based on conninfo_parse from libpq's fe-connect.c
func parseOpts(name string, o values) error {
s := newScanner(name)
for {
var (
keyRunes, valRunes []rune
r rune
ok bool
)
if r, ok = s.SkipSpaces(); !ok {
break
}
// Scan the key
for !unicode.IsSpace(r) && r != '=' {
keyRunes = append(keyRunes, r)
if r, ok = s.Next(); !ok {
break
}
}
// Skip any whitespace if we're not at the = yet
if r != '=' {
r, ok = s.SkipSpaces()
}
// The current character should be =
if r != '=' || !ok {
return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes))
}
// Skip any whitespace after the =
if r, ok = s.SkipSpaces(); !ok {
// If we reach the end here, the last value is just an empty string as per libpq.
o[string(keyRunes)] = ""
break
}
if r != '\'' {
for !unicode.IsSpace(r) {
if r == '\\' {
if r, ok = s.Next(); !ok {
return fmt.Errorf(`missing character after backslash`)
}
}
valRunes = append(valRunes, r)
if r, ok = s.Next(); !ok {
break
}
}
} else {
quote:
for {
if r, ok = s.Next(); !ok {
return fmt.Errorf(`unterminated quoted string literal in connection string`)
}
switch r {
case '\'':
break quote
case '\\':
r, _ = s.Next()
fallthrough
default:
valRunes = append(valRunes, r)
}
}
}
o[string(keyRunes)] = string(valRunes)
}
return nil
}
func (cn *conn) isInTransaction() bool {
return cn.txnStatus == txnStatusIdleInTransaction ||
cn.txnStatus == txnStatusInFailedTransaction
}
func (cn *conn) checkIsInTransaction(intxn bool) {
if cn.isInTransaction() != intxn {
cn.bad = true
errorf("unexpected transaction status %v", cn.txnStatus)
}
}
func (cn *conn) Begin() (_ driver.Tx, err error) {
return cn.begin("")
}
func (cn *conn) begin(mode string) (_ driver.Tx, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
defer cn.errRecover(&err)
cn.checkIsInTransaction(false)
_, commandTag, err := cn.simpleExec("BEGIN" + mode)
if err != nil {
return nil, err
}
if commandTag != "BEGIN" {
cn.bad = true
return nil, fmt.Errorf("unexpected command tag %s", commandTag)
}
if cn.txnStatus != txnStatusIdleInTransaction {
cn.bad = true
return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus)
}
return cn, nil
}
func (cn *conn) closeTxn() {
if finish := cn.txnFinish; finish != nil {
finish()
}
}
func (cn *conn) Commit() (err error) {
defer cn.closeTxn()
if cn.bad {
return driver.ErrBadConn
}
defer cn.errRecover(&err)
cn.checkIsInTransaction(true)
// We don't want the client to think that everything is okay if it tries
// to commit a failed transaction. However, no matter what we return,
// database/sql will release this connection back into the free connection
// pool so we have to abort the current transaction here. Note that you
// would get the same behaviour if you issued a COMMIT in a failed
// transaction, so it's also the least surprising thing to do here.
if cn.txnStatus == txnStatusInFailedTransaction {
if err := cn.Rollback(); err != nil {
return err
}
return ErrInFailedTransaction
}
_, commandTag, err := cn.simpleExec("COMMIT")
if err != nil {
if cn.isInTransaction() {
cn.bad = true
}
return err
}
if commandTag != "COMMIT" {
cn.bad = true
return fmt.Errorf("unexpected command tag %s", commandTag)
}
cn.checkIsInTransaction(false)
return nil
}
func (cn *conn) Rollback() (err error) {
defer cn.closeTxn()
if cn.bad {
return driver.ErrBadConn
}
defer cn.errRecover(&err)
cn.checkIsInTransaction(true)
_, commandTag, err := cn.simpleExec("ROLLBACK")
if err != nil {
if cn.isInTransaction() {
cn.bad = true
}
return err
}
if commandTag != "ROLLBACK" {
return fmt.Errorf("unexpected command tag %s", commandTag)
}
cn.checkIsInTransaction(false)
return nil
}
func (cn *conn) gname() string {
cn.namei++
return strconv.FormatInt(int64(cn.namei), 10)
}
func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) {
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'C':
res, commandTag = cn.parseComplete(r.string())
case 'Z':
cn.processReadyForQuery(r)
if res == nil && err == nil {
err = errUnexpectedReady
}
// done
return
case 'E':
err = parseError(r)
case 'I':
res = emptyRows
case 'T', 'D':
// ignore any results
default:
cn.bad = true
errorf("unknown response for simple query: %q", t)
}
}
}
func (cn *conn) simpleQuery(q string) (res *rows, err error) {
defer cn.errRecover(&err)
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'C', 'I':
// We allow queries which don't return any results through Query as
// well as Exec. We still have to give database/sql a rows object
// the user can close, though, to avoid connections from being
// leaked. A "rows" with done=true works fine for that purpose.
if err != nil {
cn.bad = true
errorf("unexpected message %q in simple query execution", t)
}
if res == nil {
res = &rows{
cn: cn,
}
}
// Set the result and tag to the last command complete if there wasn't a
// query already run. Although queries usually return from here and cede
// control to Next, a query with zero results does not.
if t == 'C' && res.colNames == nil {
res.result, res.tag = cn.parseComplete(r.string())
}
res.done = true
case 'Z':
cn.processReadyForQuery(r)
// done
return
case 'E':
res = nil
err = parseError(r)
case 'D':
if res == nil {
cn.bad = true
errorf("unexpected DataRow in simple query execution")
}
// the query didn't fail; kick off to Next
cn.saveMessage(t, r)
return
case 'T':
// res might be non-nil here if we received a previous
// CommandComplete, but that's fine; just overwrite it
res = &rows{cn: cn}
res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r)
// To work around a bug in QueryRow in Go 1.2 and earlier, wait
// until the first DataRow has been received.
default:
cn.bad = true
errorf("unknown response for simple query: %q", t)
}
}
}
type noRows struct{}
var emptyRows noRows
var _ driver.Result = noRows{}
func (noRows) LastInsertId() (int64, error) {
return 0, errNoLastInsertID
}
func (noRows) RowsAffected() (int64, error) {
return 0, errNoRowsAffected
}
// Decides which column formats to use for a prepared statement. The input is
// an array of type oids, one element per result column.
func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) {
if len(colTyps) == 0 {
return nil, colFmtDataAllText
}
colFmts = make([]format, len(colTyps))
if forceText {
return colFmts, colFmtDataAllText
}
allBinary := true
allText := true
for i, t := range colTyps {
switch t.OID {
// This is the list of types to use binary mode for when receiving them
// through a prepared statement. If a type appears in this list, it
// must also be implemented in binaryDecode in encode.go.
case oid.T_bytea:
fallthrough
case oid.T_int8:
fallthrough
case oid.T_int4:
fallthrough
case oid.T_int2:
fallthrough
case oid.T_uuid:
colFmts[i] = formatBinary
allText = false
default:
allBinary = false
}
}
if allBinary {
return colFmts, colFmtDataAllBinary
} else if allText {
return colFmts, colFmtDataAllText
} else {
colFmtData = make([]byte, 2+len(colFmts)*2)
binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts)))
for i, v := range colFmts {
binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v))
}
return colFmts, colFmtData
}
}
func (cn *conn) prepareTo(q, stmtName string) *stmt {
st := &stmt{cn: cn, name: stmtName}
b := cn.writeBuf('P')
b.string(st.name)
b.string(q)
b.int16(0)
b.next('D')
b.byte('S')
b.string(st.name)
b.next('S')
cn.send(b)
cn.readParseResponse()
st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse()
st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult)
cn.readReadyForQuery()
return st
}
func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
defer cn.errRecover(&err)
if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") {
s, err := cn.prepareCopyIn(q)
if err == nil {
cn.inCopy = true
}
return s, err
}
return cn.prepareTo(q, cn.gname()), nil
}
func (cn *conn) Close() (err error) {
// Skip cn.bad return here because we always want to close a connection.
defer cn.errRecover(&err)
// Ensure that cn.c.Close is always run. Since error handling is done with
// panics and cn.errRecover, the Close must be in a defer.
defer func() {
cerr := cn.c.Close()
if err == nil {
err = cerr
}
}()
// Don't go through send(); ListenerConn relies on us not scribbling on the
// scratch buffer of this connection.
return cn.sendSimpleMessage('X')
}
// Implement the "Queryer" interface
func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) {
return cn.query(query, args)
}
func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
if cn.inCopy {
return nil, errCopyInProgress
}
defer cn.errRecover(&err)
// Check to see if we can use the "simpleQuery" interface, which is
// *much* faster than going through prepare/exec
if len(args) == 0 {
return cn.simpleQuery(query)
}
if cn.binaryParameters {
cn.sendBinaryModeQuery(query, args)
cn.readParseResponse()
cn.readBindResponse()
rows := &rows{cn: cn}
rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse()
cn.postExecuteWorkaround()
return rows, nil
}
st := cn.prepareTo(query, "")
st.exec(args)
return &rows{
cn: cn,
colNames: st.colNames,
colTyps: st.colTyps,
colFmts: st.colFmts,
}, nil
}
// Implement the optional "Execer" interface for one-shot queries
func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
defer cn.errRecover(&err)
// Check to see if we can use the "simpleExec" interface, which is
// *much* faster than going through prepare/exec
if len(args) == 0 {
// ignore commandTag, our caller doesn't care
r, _, err := cn.simpleExec(query)
return r, err
}
if cn.binaryParameters {
cn.sendBinaryModeQuery(query, args)
cn.readParseResponse()
cn.readBindResponse()
cn.readPortalDescribeResponse()
cn.postExecuteWorkaround()
res, _, err = cn.readExecuteResponse("Execute")
return res, err
}
// Use the unnamed statement to defer planning until bind
// time, or else value-based selectivity estimates cannot be
// used.
st := cn.prepareTo(query, "")
r, err := st.Exec(args)
if err != nil {
panic(err)
}
return r, err
}
func (cn *conn) send(m *writeBuf) {
_, err := cn.c.Write(m.wrap())
if err != nil {
panic(err)
}
}
func (cn *conn) sendStartupPacket(m *writeBuf) error {
_, err := cn.c.Write((m.wrap())[1:])
return err
}
// Send a message of type typ to the server on the other end of cn. The
// message should have no payload. This method does not use the scratch
// buffer.
func (cn *conn) sendSimpleMessage(typ byte) (err error) {
_, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'})
return err
}
// saveMessage memorizes a message and its buffer in the conn struct.
// recvMessage will then return these values on the next call to it. This
// method is useful in cases where you have to see what the next message is
// going to be (e.g. to see whether it's an error or not) but you can't handle
// the message yourself.
func (cn *conn) saveMessage(typ byte, buf *readBuf) {
if cn.saveMessageType != 0 {
cn.bad = true
errorf("unexpected saveMessageType %d", cn.saveMessageType)
}
cn.saveMessageType = typ
cn.saveMessageBuffer = *buf
}
// recvMessage receives any message from the backend, or returns an error if
// a problem occurred while reading the message.
func (cn *conn) recvMessage(r *readBuf) (byte, error) {
// workaround for a QueryRow bug, see exec
if cn.saveMessageType != 0 {
t := cn.saveMessageType
*r = cn.saveMessageBuffer
cn.saveMessageType = 0
cn.saveMessageBuffer = nil
return t, nil
}
x := cn.scratch[:5]
_, err := io.ReadFull(cn.buf, x)
if err != nil {
return 0, err
}
// read the type and length of the message that follows
t := x[0]
n := int(binary.BigEndian.Uint32(x[1:])) - 4
var y []byte
if n <= len(cn.scratch) {
y = cn.scratch[:n]
} else {
y = make([]byte, n)
}
_, err = io.ReadFull(cn.buf, y)
if err != nil {
return 0, err
}
*r = y
return t, nil
}
// recv receives a message from the backend, but if an error happened while
// reading the message or the received message was an ErrorResponse, it panics.
// NoticeResponses are ignored. This function should generally be used only
// during the startup sequence.
func (cn *conn) recv() (t byte, r *readBuf) {
for {
var err error
r = &readBuf{}
t, err = cn.recvMessage(r)
if err != nil {
panic(err)
}
switch t {
case 'E':
panic(parseError(r))
case 'N':
// ignore
default:
return
}
}
}
// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by
// the caller to avoid an allocation.
func (cn *conn) recv1Buf(r *readBuf) byte {
for {
t, err := cn.recvMessage(r)
if err != nil {
panic(err)
}
switch t {
case 'A', 'N':
// ignore
case 'S':
cn.processParameterStatus(r)
default:
return t
}
}
}
// recv1 receives a message from the backend, panicking if an error occurs
// while attempting to read it. All asynchronous messages are ignored, with
// the exception of ErrorResponse.
func (cn *conn) recv1() (t byte, r *readBuf) {
r = &readBuf{}
t = cn.recv1Buf(r)
return t, r
}
func (cn *conn) ssl(o values) {
upgrade := ssl(o)
if upgrade == nil {
// Nothing to do
return
}
w := cn.writeBuf(0)
w.int32(80877103)
if err := cn.sendStartupPacket(w); err != nil {
panic(err)
}
b := cn.scratch[:1]
_, err := io.ReadFull(cn.c, b)
if err != nil {
panic(err)
}
if b[0] != 'S' {
panic(ErrSSLNotSupported)
}
cn.c = upgrade(cn.c)
}
// isDriverSetting returns true iff a setting is purely for configuring the
// driver's options and should not be sent to the server in the connection
// startup packet.
func isDriverSetting(key string) bool {
switch key {
case "host", "port":
return true
case "password":
return true
case "sslmode", "sslcert", "sslkey", "sslrootcert":
return true
case "fallback_application_name":
return true
case "connect_timeout":
return true
case "disable_prepared_binary_result":
return true
case "binary_parameters":
return true
default:
return false
}
}
func (cn *conn) startup(o values) {
w := cn.writeBuf(0)
w.int32(196608)
// Send the backend the name of the database we want to connect to, and the
// user we want to connect as. Additionally, we send over any run-time
// parameters potentially included in the connection string. If the server
// doesn't recognize any of them, it will reply with an error.
for k, v := range o {
if isDriverSetting(k) {
// skip options which can't be run-time parameters
continue
}
// The protocol requires us to supply the database name as "database"
// instead of "dbname".
if k == "dbname" {
k = "database"
}
w.string(k)
w.string(v)
}
w.string("")
if err := cn.sendStartupPacket(w); err != nil {
panic(err)
}
for {
t, r := cn.recv()
switch t {
case 'K':
cn.processBackendKeyData(r)
case 'S':
cn.processParameterStatus(r)
case 'R':
cn.auth(r, o)
case 'Z':
cn.processReadyForQuery(r)
return
default:
errorf("unknown response for startup: %q", t)
}
}
}
func (cn *conn) auth(r *readBuf, o values) {
switch code := r.int32(); code {
case 0:
// OK
case 3:
w := cn.writeBuf('p')
w.string(o["password"])
cn.send(w)
t, r := cn.recv()
if t != 'R' {
errorf("unexpected password response: %q", t)
}
if r.int32() != 0 {
errorf("unexpected authentication response: %q", t)
}
case 5:
s := string(r.next(4))
w := cn.writeBuf('p')
w.string("md5" + md5s(md5s(o["password"]+o["user"])+s))
cn.send(w)
t, r := cn.recv()
if t != 'R' {
errorf("unexpected password response: %q", t)
}
if r.int32() != 0 {
errorf("unexpected authentication response: %q", t)
}
default:
errorf("unknown authentication response: %d", code)
}
}
type format int
const formatText format = 0
const formatBinary format = 1
// One result-column format code with the value 1 (i.e. all binary).
var colFmtDataAllBinary = []byte{0, 1, 0, 1}
// No result-column format codes (i.e. all text).
var colFmtDataAllText = []byte{0, 0}
type stmt struct {
cn *conn
name string
colNames []string
colFmts []format
colFmtData []byte
colTyps []fieldDesc
paramTyps []oid.Oid
closed bool
}
func (st *stmt) Close() (err error) {
if st.closed {
return nil
}
if st.cn.bad {
return driver.ErrBadConn
}
defer st.cn.errRecover(&err)
w := st.cn.writeBuf('C')
w.byte('S')
w.string(st.name)
st.cn.send(w)
st.cn.send(st.cn.writeBuf('S'))
t, _ := st.cn.recv1()
if t != '3' {
st.cn.bad = true
errorf("unexpected close response: %q", t)
}
st.closed = true
t, r := st.cn.recv1()
if t != 'Z' {
st.cn.bad = true
errorf("expected ready for query, but got: %q", t)
}
st.cn.processReadyForQuery(r)
return nil
}
func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) {
if st.cn.bad {
return nil, driver.ErrBadConn
}
defer st.cn.errRecover(&err)
st.exec(v)
return &rows{
cn: st.cn,
colNames: st.colNames,
colTyps: st.colTyps,
colFmts: st.colFmts,
}, nil
}
func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) {
if st.cn.bad {
return nil, driver.ErrBadConn
}
defer st.cn.errRecover(&err)
st.exec(v)
res, _, err = st.cn.readExecuteResponse("simple query")
return res, err
}
func (st *stmt) exec(v []driver.Value) {
if len(v) >= 65536 {
errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v))
}
if len(v) != len(st.paramTyps) {
errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps))
}
cn := st.cn
w := cn.writeBuf('B')
w.byte(0) // unnamed portal
w.string(st.name)
if cn.binaryParameters {
cn.sendBinaryParameters(w, v)
} else {
w.int16(0)
w.int16(len(v))
for i, x := range v {
if x == nil {
w.int32(-1)
} else {
b := encode(&cn.parameterStatus, x, st.paramTyps[i])
w.int32(len(b))
w.bytes(b)
}
}
}
w.bytes(st.colFmtData)
w.next('E')
w.byte(0)
w.int32(0)
w.next('S')
cn.send(w)
cn.readBindResponse()
cn.postExecuteWorkaround()
}
func (st *stmt) NumInput() int {
return len(st.paramTyps)
}
// parseComplete parses the "command tag" from a CommandComplete message, and
// returns the number of rows affected (if applicable) and a string
// identifying only the command that was executed, e.g. "ALTER TABLE". If the
// command tag could not be parsed, parseComplete panics.
func (cn *conn) parseComplete(commandTag string) (driver.Result, string) {
commandsWithAffectedRows := []string{
"SELECT ",
// INSERT is handled below
"UPDATE ",
"DELETE ",
"FETCH ",
"MOVE ",
"COPY ",
}
var affectedRows *string
for _, tag := range commandsWithAffectedRows {
if strings.HasPrefix(commandTag, tag) {
t := commandTag[len(tag):]
affectedRows = &t
commandTag = tag[:len(tag)-1]
break
}
}
// INSERT also includes the oid of the inserted row in its command tag.
// Oids in user tables are deprecated, and the oid is only returned when
// exactly one row is inserted, so it's unlikely to be of value to any
// real-world application and we can ignore it.
if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") {
parts := strings.Split(commandTag, " ")
if len(parts) != 3 {
cn.bad = true
errorf("unexpected INSERT command tag %s", commandTag)
}
affectedRows = &parts[len(parts)-1]
commandTag = "INSERT"
}
// There should be no affected rows attached to the tag, just return it
if affectedRows == nil {
return driver.RowsAffected(0), commandTag
}
n, err := strconv.ParseInt(*affectedRows, 10, 64)
if err != nil {
cn.bad = true
errorf("could not parse commandTag: %s", err)
}
return driver.RowsAffected(n), commandTag
}
type rows struct {
cn *conn
finish func()
colNames []string
colTyps []fieldDesc
colFmts []format
done bool
rb readBuf
result driver.Result
tag string
}
func (rs *rows) Close() error {
if finish := rs.finish; finish != nil {
defer finish()
}
// no need to look at cn.bad as Next() will
for {
err := rs.Next(nil)
switch err {
case nil:
case io.EOF:
// rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row
// description, used with HasNextResultSet). We need to fetch messages until
// we hit a 'Z', which is done by waiting for done to be set.
if rs.done {
return nil
}
default:
return err
}
}
}
func (rs *rows) Columns() []string {
return rs.colNames
}
func (rs *rows) Result() driver.Result {
if rs.result == nil {
return emptyRows
}
return rs.result
}
func (rs *rows) Tag() string {
return rs.tag
}
func (rs *rows) Next(dest []driver.Value) (err error) {
if rs.done {
return io.EOF
}
conn := rs.cn
if conn.bad {
return driver.ErrBadConn
}
defer conn.errRecover(&err)
for {
t := conn.recv1Buf(&rs.rb)
switch t {
case 'E':
err = parseError(&rs.rb)
case 'C', 'I':
if t == 'C' {
rs.result, rs.tag = conn.parseComplete(rs.rb.string())
}
continue
case 'Z':
conn.processReadyForQuery(&rs.rb)
rs.done = true
if err != nil {
return err
}
return io.EOF
case 'D':
n := rs.rb.int16()
if err != nil {
conn.bad = true
errorf("unexpected DataRow after error %s", err)
}
if n < len(dest) {
dest = dest[:n]
}
for i := range dest {
l := rs.rb.int32()
if l == -1 {
dest[i] = nil
continue
}
dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i])
}
return
case 'T':
rs.colNames, rs.colFmts, rs.colTyps = parsePortalRowDescribe(&rs.rb)
return io.EOF
default:
errorf("unexpected message after execute: %q", t)
}
}
}
func (rs *rows) HasNextResultSet() bool {
return !rs.done
}
func (rs *rows) NextResultSet() error {
return nil
}
// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
// used as part of an SQL statement. For example:
//
// tblname := "my_table"
// data := "my_data"
// err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data)
//
// Any double quotes in name will be escaped. The quoted identifier will be
// case sensitive when used in a query. If the input string contains a zero
// byte, the result will be truncated immediately before it.
func QuoteIdentifier(name string) string {
end := strings.IndexRune(name, 0)
if end > -1 {
name = name[:end]
}
return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
}
func md5s(s string) string {
h := md5.New()
h.Write([]byte(s))
return fmt.Sprintf("%x", h.Sum(nil))
}
func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) {
// Do one pass over the parameters to see if we're going to send any of
// them over in binary. If we are, create a paramFormats array at the
// same time.
var paramFormats []int
for i, x := range args {
_, ok := x.([]byte)
if ok {
if paramFormats == nil {
paramFormats = make([]int, len(args))
}
paramFormats[i] = 1
}
}
if paramFormats == nil {
b.int16(0)
} else {
b.int16(len(paramFormats))
for _, x := range paramFormats {
b.int16(x)
}
}
b.int16(len(args))
for _, x := range args {
if x == nil {
b.int32(-1)
} else {
datum := binaryEncode(&cn.parameterStatus, x)
b.int32(len(datum))
b.bytes(datum)
}
}
}
func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) {
if len(args) >= 65536 {
errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args))
}
b := cn.writeBuf('P')
b.byte(0) // unnamed statement
b.string(query)
b.int16(0)
b.next('B')
b.int16(0) // unnamed portal and statement
cn.sendBinaryParameters(b, args)
b.bytes(colFmtDataAllText)
b.next('D')
b.byte('P')
b.byte(0) // unnamed portal
b.next('E')
b.byte(0)
b.int32(0)
b.next('S')
cn.send(b)
}
func (cn *conn) processParameterStatus(r *readBuf) {
var err error
param := r.string()
switch param {
case "server_version":
var major1 int
var major2 int
var minor int
_, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor)
if err == nil {
cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor
}
case "TimeZone":
cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string())
if err != nil {
cn.parameterStatus.currentLocation = nil
}
default:
// ignore
}
}
func (cn *conn) processReadyForQuery(r *readBuf) {
cn.txnStatus = transactionStatus(r.byte())
}
func (cn *conn) readReadyForQuery() {
t, r := cn.recv1()
switch t {
case 'Z':
cn.processReadyForQuery(r)
return
default:
cn.bad = true
errorf("unexpected message %q; expected ReadyForQuery", t)
}
}
func (cn *conn) processBackendKeyData(r *readBuf) {
cn.processID = r.int32()
cn.secretKey = r.int32()
}
func (cn *conn) readParseResponse() {
t, r := cn.recv1()
switch t {
case '1':
return
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Parse response %q", t)
}
}
func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) {
for {
t, r := cn.recv1()
switch t {
case 't':
nparams := r.int16()
paramTyps = make([]oid.Oid, nparams)
for i := range paramTyps {
paramTyps[i] = r.oid()
}
case 'n':
return paramTyps, nil, nil
case 'T':
colNames, colTyps = parseStatementRowDescribe(r)
return paramTyps, colNames, colTyps
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Describe statement response %q", t)
}
}
}
func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) {
t, r := cn.recv1()
switch t {
case 'T':
return parsePortalRowDescribe(r)
case 'n':
return nil, nil, nil
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Describe response %q", t)
}
panic("not reached")
}
func (cn *conn) readBindResponse() {
t, r := cn.recv1()
switch t {
case '2':
return
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Bind response %q", t)
}
}
func (cn *conn) postExecuteWorkaround() {
// Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores
// any errors from rows.Next, which masks errors that happened during the
// execution of the query. To avoid the problem in common cases, we wait
// here for one more message from the database. If it's not an error the
// query will likely succeed (or perhaps has already, if it's a
// CommandComplete), so we push the message into the conn struct; recv1
// will return it as the next message for rows.Next or rows.Close.
// However, if it's an error, we wait until ReadyForQuery and then return
// the error to our caller.
for {
t, r := cn.recv1()
switch t {
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
case 'C', 'D', 'I':
// the query didn't fail, but we can't process this message
cn.saveMessage(t, r)
return
default:
cn.bad = true
errorf("unexpected message during extended query execution: %q", t)
}
}
}
// Only for Exec(), since we ignore the returned data
func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) {
for {
t, r := cn.recv1()
switch t {
case 'C':
if err != nil {
cn.bad = true
errorf("unexpected CommandComplete after error %s", err)
}
res, commandTag = cn.parseComplete(r.string())
case 'Z':
cn.processReadyForQuery(r)
if res == nil && err == nil {
err = errUnexpectedReady
}
return res, commandTag, err
case 'E':
err = parseError(r)
case 'T', 'D', 'I':
if err != nil {
cn.bad = true
errorf("unexpected %q after error %s", t, err)
}
if t == 'I' {
res = emptyRows
}
// ignore any results
default:
cn.bad = true
errorf("unknown %s response: %q", protocolState, t)
}
}
}
func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) {
n := r.int16()
colNames = make([]string, n)
colTyps = make([]fieldDesc, n)
for i := range colNames {
colNames[i] = r.string()
r.next(6)
colTyps[i].OID = r.oid()
colTyps[i].Len = r.int16()
colTyps[i].Mod = r.int32()
// format code not known when describing a statement; always 0
r.next(2)
}
return
}
func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) {
n := r.int16()
colNames = make([]string, n)
colFmts = make([]format, n)
colTyps = make([]fieldDesc, n)
for i := range colNames {
colNames[i] = r.string()
r.next(6)
colTyps[i].OID = r.oid()
colTyps[i].Len = r.int16()
colTyps[i].Mod = r.int32()
colFmts[i] = format(r.int16())
}
return
}
// parseEnviron tries to mimic some of libpq's environment handling
//
// To ease testing, it does not directly reference os.Environ, but is
// designed to accept its output.
//
// Environment-set connection information is intended to have a higher
// precedence than a library default but lower than any explicitly
// passed information (such as in the URL or connection string).
func parseEnviron(env []string) (out map[string]string) {
out = make(map[string]string)
for _, v := range env {
parts := strings.SplitN(v, "=", 2)
accrue := func(keyname string) {
out[keyname] = parts[1]
}
unsupported := func() {
panic(fmt.Sprintf("setting %v not supported", parts[0]))
}
// The order of these is the same as is seen in the
// PostgreSQL 9.1 manual. Unsupported but well-defined
// keys cause a panic; these should be unset prior to
// execution. Options which pq expects to be set to a
// certain value are allowed, but must be set to that
// value if present (they can, of course, be absent).
switch parts[0] {
case "PGHOST":
accrue("host")
case "PGHOSTADDR":
unsupported()
case "PGPORT":
accrue("port")
case "PGDATABASE":
accrue("dbname")
case "PGUSER":
accrue("user")
case "PGPASSWORD":
accrue("password")
case "PGSERVICE", "PGSERVICEFILE", "PGREALM":
unsupported()
case "PGOPTIONS":
accrue("options")
case "PGAPPNAME":
accrue("application_name")
case "PGSSLMODE":
accrue("sslmode")
case "PGSSLCERT":
accrue("sslcert")
case "PGSSLKEY":
accrue("sslkey")
case "PGSSLROOTCERT":
accrue("sslrootcert")
case "PGREQUIRESSL", "PGSSLCRL":
unsupported()
case "PGREQUIREPEER":
unsupported()
case "PGKRBSRVNAME", "PGGSSLIB":
unsupported()
case "PGCONNECT_TIMEOUT":
accrue("connect_timeout")
case "PGCLIENTENCODING":
accrue("client_encoding")
case "PGDATESTYLE":
accrue("datestyle")
case "PGTZ":
accrue("timezone")
case "PGGEQO":
accrue("geqo")
case "PGSYSCONFDIR", "PGLOCALEDIR":
unsupported()
}
}
return out
}
// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8".
func isUTF8(name string) bool {
// Recognize all sorts of silly things as "UTF-8", like Postgres does
s := strings.Map(alnumLowerASCII, name)
return s == "utf8" || s == "unicode"
}
func alnumLowerASCII(ch rune) rune {
if 'A' <= ch && ch <= 'Z' {
return ch + ('a' - 'A')
}
if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' {
return ch
}
return -1 // discard
}
| [
"\"PGPASSFILE\""
]
| []
| [
"PGPASSFILE"
]
| [] | ["PGPASSFILE"] | go | 1 | 0 | |
examples/documentation_examples/examples_test.go | // Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
// NOTE: Any time this file is modified, a WEBSITE ticket should be opened to sync the changes with
// the "What is MongoDB" webpage, which the example was originally added to as part of WEBSITE-5148.
package documentation_examples_test
import (
"context"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/examples/documentation_examples"
"go.mongodb.org/mongo-driver/internal/testutil"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/description"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/bsonx"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
)
func TestDocumentationExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cs := testutil.ConnString(t)
client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(cs.String()))
require.NoError(t, err)
defer client.Disconnect(ctx)
db := client.Database("documentation_examples")
documentation_examples.InsertExamples(t, db)
documentation_examples.QueryToplevelFieldsExamples(t, db)
documentation_examples.QueryEmbeddedDocumentsExamples(t, db)
documentation_examples.QueryArraysExamples(t, db)
documentation_examples.QueryArrayEmbeddedDocumentsExamples(t, db)
documentation_examples.QueryNullMissingFieldsExamples(t, db)
documentation_examples.ProjectionExamples(t, db)
documentation_examples.UpdateExamples(t, db)
documentation_examples.DeleteExamples(t, db)
documentation_examples.RunCommandExamples(t, db)
documentation_examples.IndexExamples(t, db)
documentation_examples.VersionedAPIExamples()
// Because it uses RunCommand with an apiVersion, the strict count example can only be
// run on 5.0+ without auth.
ver, err := getServerVersion(ctx, client)
require.NoError(t, err, "getServerVersion error: %v", err)
auth := os.Getenv("AUTH") == "auth"
if testutil.CompareVersions(t, ver, "5.0") >= 0 && !auth {
documentation_examples.VersionedAPIStrictCountExample(t)
} else {
t.Log("skipping versioned API strict count example")
}
}
func TestAggregationExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cs := testutil.ConnString(t)
client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(cs.String()))
require.NoError(t, err)
defer client.Disconnect(ctx)
db := client.Database("documentation_examples")
ver, err := getServerVersion(ctx, client)
if err != nil || testutil.CompareVersions(t, ver, "3.6") < 0 {
t.Skip("server does not support let in $lookup in aggregations")
}
documentation_examples.AggregationExamples(t, db)
}
func TestTransactionExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
topo := createTopology(t)
client, err := mongo.Connect(context.Background(), &options.ClientOptions{Deployment: topo})
require.NoError(t, err)
defer client.Disconnect(ctx)
ver, err := getServerVersion(ctx, client)
if err != nil || testutil.CompareVersions(t, ver, "4.0") < 0 || topo.Kind() != description.ReplicaSet {
t.Skip("server does not support transactions")
}
err = documentation_examples.TransactionsExamples(ctx, client)
require.NoError(t, err)
}
func TestChangeStreamExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
topo := createTopology(t)
client, err := mongo.Connect(context.Background(), &options.ClientOptions{Deployment: topo})
require.NoError(t, err)
defer client.Disconnect(ctx)
db := client.Database("changestream_examples")
ver, err := getServerVersion(ctx, client)
if err != nil || testutil.CompareVersions(t, ver, "3.6") < 0 || topo.Kind() != description.ReplicaSet {
t.Skip("server does not support changestreams")
}
documentation_examples.ChangeStreamExamples(t, db)
}
func getServerVersion(ctx context.Context, client *mongo.Client) (string, error) {
serverStatus, err := client.Database("admin").RunCommand(
ctx,
bsonx.Doc{{"serverStatus", bsonx.Int32(1)}},
).DecodeBytes()
if err != nil {
return "", err
}
version, err := serverStatus.LookupErr("version")
if err != nil {
return "", err
}
return version.StringValue(), nil
}
func createTopology(t *testing.T) *topology.Topology {
topo, err := topology.New(topology.WithConnString(func(connstring.ConnString) connstring.ConnString {
return testutil.ConnString(t)
}))
if err != nil {
t.Fatalf("topology.New error: %v", err)
}
return topo
}
| [
"\"AUTH\""
]
| []
| [
"AUTH"
]
| [] | ["AUTH"] | go | 1 | 0 | |
clinica/pipelines/dwi_connectome/dwi_connectome_utils.py | # coding: utf8
def get_luts():
import os
from clinica.utils.exceptions import ClinicaException
try:
# For aparc+aseg.mgz file:
default = os.path.join(os.environ['FREESURFER_HOME'],
'FreeSurferColorLUT.txt')
# For aparc.a2009s+aseg.mgz file:
a2009s = os.path.join(os.environ['FREESURFER_HOME'],
'FreeSurferColorLUT.txt')
# TODO: Add custom Lausanne2008 LUTs here.
except KeyError:
raise ClinicaException('Could not find FREESURFER_HOME environment variable.')
return [default, a2009s]
def get_conversion_luts():
import os
from clinica.utils.exceptions import ClinicaException
try:
# For aparc+aseg.mgz file:
default = os.path.join(os.environ['MRTRIX_HOME'],
'share/mrtrix3/labelconvert/fs_default.txt')
# For aparc.a2009s+aseg.mgz file:
a2009s = os.path.join(os.environ['MRTRIX_HOME'],
'share/mrtrix3/labelconvert/fs_a2009s.txt')
# TODO: Add custom Lausanne2008 conversion LUTs here.
except KeyError:
raise ClinicaException('Could not find MRTRIX_HOME environment variable.')
return [default, a2009s]
def get_containers(subjects, sessions):
return [
'subjects/' + subjects[i] + '/' + sessions[i] + '/dwi'
for i in range(len(subjects))
]
def get_caps_filenames(dwi_file: str):
import re
m = re.search(r"/(sub-[a-zA-Z0-9]+_ses-[a-zA-Z0-9]+.*)_preproc", dwi_file)
if not m:
raise ValueError(f"Input filename {dwi_file} is not in a CAPS compliant format.")
source_file_caps = m.group(1)
m = re.search(r"/(sub-[a-zA-Z0-9]+_ses-[a-zA-Z0-9]+.*)_space-[a-zA-Z0-9]+_preproc", dwi_file)
if not m:
raise ValueError(f"Input filename {dwi_file} is not in a CAPS compliant format.")
source_file_bids = m.group(1)
response = source_file_caps + '_model-CSD_responseFunction.txt'
fod = source_file_caps + '_model-CSD_diffmodel.nii.gz'
tracts = source_file_caps + '_model-CSD_tractography.tck'
nodes = [source_file_caps + '_atlas-desikan_parcellation.nii.gz',
source_file_caps + '_atlas-destrieux_parcellation.nii.gz']
# TODO: Add custom Lausanne2008 node files here.
connectomes = [source_file_bids + '_model-CSD_atlas-desikan_connectivity.tsv',
source_file_bids + '_model-CSD_atlas-destrieux_connectivity.tsv']
# TODO: Add custom Lausanne2008 connectome files here.
return response, fod, tracts, nodes, connectomes
def print_begin_pipeline(in_bids_or_caps_file: str) -> None:
"""
"""
import re
from clinica.utils.stream import cprint
m = re.search(r'(sub-[a-zA-Z0-9]+)_(ses-[a-zA-Z0-9]+)',
in_bids_or_caps_file)
if not m:
raise ValueError(
f"Input filename {in_bids_or_caps_file} is not in a BIDS or CAPS compliant format."
)
cprint(f"Running pipeline for {m.group(0)}")
def print_end_pipeline(in_bids_or_caps_file: str, final_file: str) -> None:
"""
"""
import re
from clinica.utils.stream import cprint
m = re.search(r'(sub-[a-zA-Z0-9]+)_(ses-[a-zA-Z0-9]+)',
in_bids_or_caps_file)
if not m:
raise ValueError(
f"Input filename {in_bids_or_caps_file} is not in a BIDS or CAPS compliant format."
)
cprint(f"...{m.group(0)} has completed.")
| []
| []
| [
"MRTRIX_HOME",
"FREESURFER_HOME"
]
| [] | ["MRTRIX_HOME", "FREESURFER_HOME"] | python | 2 | 0 | |
backend/app/cmd/server.go | package cmd
import (
"context"
"fmt"
"net/http"
"net/url"
"os"
"os/signal"
"path"
"strings"
"syscall"
"time"
bolt "github.com/coreos/bbolt"
"github.com/go-pkgz/lgr"
log "github.com/go-pkgz/lgr"
"github.com/pkg/errors"
"github.com/go-pkgz/auth"
"github.com/go-pkgz/auth/avatar"
"github.com/go-pkgz/auth/provider"
"github.com/go-pkgz/auth/token"
"github.com/go-pkgz/mongo"
"github.com/go-pkgz/rest/cache"
"github.com/umputun/remark/backend/app/migrator"
"github.com/umputun/remark/backend/app/notify"
"github.com/umputun/remark/backend/app/rest/api"
"github.com/umputun/remark/backend/app/rest/proxy"
"github.com/umputun/remark/backend/app/store"
"github.com/umputun/remark/backend/app/store/admin"
"github.com/umputun/remark/backend/app/store/engine"
"github.com/umputun/remark/backend/app/store/service"
)
// ServerCommand with command line flags and env
type ServerCommand struct {
Store StoreGroup `group:"store" namespace:"store" env-namespace:"STORE"`
Avatar AvatarGroup `group:"avatar" namespace:"avatar" env-namespace:"AVATAR"`
Cache CacheGroup `group:"cache" namespace:"cache" env-namespace:"CACHE"`
Mongo MongoGroup `group:"mongo" namespace:"mongo" env-namespace:"MONGO"`
Admin AdminGroup `group:"admin" namespace:"admin" env-namespace:"ADMIN"`
Notify NotifyGroup `group:"notify" namespace:"notify" env-namespace:"NOTIFY"`
SSL SSLGroup `group:"ssl" namespace:"ssl" env-namespace:"SSL"`
Sites []string `long:"site" env:"SITE" default:"remark" description:"site names" env-delim:","`
AdminPasswd string `long:"admin-passwd" env:"ADMIN_PASSWD" default:"" description:"admin basic auth password"`
BackupLocation string `long:"backup" env:"BACKUP_PATH" default:"./var/backup" description:"backups location"`
MaxBackupFiles int `long:"max-back" env:"MAX_BACKUP_FILES" default:"10" description:"max backups to keep"`
ImageProxy bool `long:"img-proxy" env:"IMG_PROXY" description:"enable image proxy"`
MaxCommentSize int `long:"max-comment" env:"MAX_COMMENT_SIZE" default:"2048" description:"max comment size"`
MaxVotes int `long:"max-votes" env:"MAX_VOTES" default:"-1" description:"maximum number of votes per comment"`
LowScore int `long:"low-score" env:"LOW_SCORE" default:"-5" description:"low score threshold"`
CriticalScore int `long:"critical-score" env:"CRITICAL_SCORE" default:"-10" description:"critical score threshold"`
ReadOnlyAge int `long:"read-age" env:"READONLY_AGE" default:"0" description:"read-only age of comments, days"`
EditDuration time.Duration `long:"edit-time" env:"EDIT_TIME" default:"5m" description:"edit window"`
Port int `long:"port" env:"REMARK_PORT" default:"8080" description:"port"`
WebRoot string `long:"web-root" env:"REMARK_WEB_ROOT" default:"./web" description:"web root directory"`
Auth struct {
TTL struct {
JWT time.Duration `long:"jwt" env:"JWT" default:"5m" description:"jwt TTL"`
Cookie time.Duration `long:"cookie" env:"COOKIE" default:"200h" description:"auth cookie TTL"`
} `group:"ttl" namespace:"ttl" env-namespace:"TTL"`
Google AuthGroup `group:"google" namespace:"google" env-namespace:"GOOGLE" description:"Google OAuth"`
Github AuthGroup `group:"github" namespace:"github" env-namespace:"GITHUB" description:"Github OAuth"`
Facebook AuthGroup `group:"facebook" namespace:"facebook" env-namespace:"FACEBOOK" description:"Facebook OAuth"`
Yandex AuthGroup `group:"yandex" namespace:"yandex" env-namespace:"YANDEX" description:"Yandex OAuth"`
Dev bool `long:"dev" env:"DEV" description:"enable dev (local) oauth2"`
} `group:"auth" namespace:"auth" env-namespace:"AUTH"`
CommonOpts
}
// AuthGroup defines options group for auth params
type AuthGroup struct {
CID string `long:"cid" env:"CID" description:"OAuth client ID"`
CSEC string `long:"csec" env:"CSEC" description:"OAuth client secret"`
}
// StoreGroup defines options group for store params
type StoreGroup struct {
Type string `long:"type" env:"TYPE" description:"type of storage" choice:"bolt" choice:"mongo" default:"bolt"`
Bolt struct {
Path string `long:"path" env:"PATH" default:"./var" description:"parent dir for bolt files"`
Timeout time.Duration `long:"timeout" env:"TIMEOUT" default:"30s" description:"bolt timeout"`
} `group:"bolt" namespace:"bolt" env-namespace:"BOLT"`
}
// AvatarGroup defines options group for avatar params
type AvatarGroup struct {
Type string `long:"type" env:"TYPE" description:"type of avatar storage" choice:"fs" choice:"bolt" choice:"mongo" default:"fs"`
FS struct {
Path string `long:"path" env:"PATH" default:"./var/avatars" description:"avatars location"`
} `group:"fs" namespace:"fs" env-namespace:"FS"`
Bolt struct {
File string `long:"file" env:"FILE" default:"./var/avatars.db" description:"avatars bolt file location"`
} `group:"bolt" namespace:"bolt" env-namespace:"bolt"`
RszLmt int `long:"rsz-lmt" env:"RESIZE" default:"0" description:"max image size for resizing avatars on save"`
}
// CacheGroup defines options group for cache params
type CacheGroup struct {
Type string `long:"type" env:"TYPE" description:"type of cache" choice:"mem" choice:"mongo" choice:"none" default:"mem"`
Max struct {
Items int `long:"items" env:"ITEMS" default:"1000" description:"max cached items"`
Value int `long:"value" env:"VALUE" default:"65536" description:"max size of cached value"`
Size int64 `long:"size" env:"SIZE" default:"50000000" description:"max size of total cache"`
} `group:"max" namespace:"max" env-namespace:"MAX"`
}
// MongoGroup holds all mongo params, used by store, avatar and cache
type MongoGroup struct {
URL string `long:"url" env:"URL" description:"mongo url"`
DB string `long:"db" env:"DB" default:"remark42" description:"mongo database"`
}
// AdminGroup defines options group for admin params
type AdminGroup struct {
Type string `long:"type" env:"TYPE" description:"type of admin store" choice:"shared" choice:"mongo" default:"shared"`
Shared struct {
Admins []string `long:"id" env:"ID" description:"admin(s) ids" env-delim:","`
Email string `long:"email" env:"EMAIL" default:"" description:"admin email"`
} `group:"shared" namespace:"shared" env-namespace:"SHARED"`
}
// NotifyGroup defines options for notification
type NotifyGroup struct {
Type string `long:"type" env:"TYPE" description:"type of notification" choice:"none" choice:"telegram" default:"none"`
QueueSize int `long:"queue" env:"QUEUE" description:"size of notification queue" default:"100"`
Telegram struct {
Token string `long:"token" env:"TOKEN" description:"telegram token"`
Channel string `long:"chan" env:"CHAN" description:"telegram channel"`
Timeout time.Duration `long:"timeout" env:"TIMEOUT" default:"5s" description:"telegram timeout"`
API string `long:"api" env:"API" default:"https://api.telegram.org/bot" description:"telegram api prefix"`
} `group:"telegram" namespace:"telegram" env-namespace:"TELEGRAM"`
}
// SSLGroup defines options group for server ssl params
type SSLGroup struct {
Type string `long:"type" env:"TYPE" description:"ssl (auto)support" choice:"none" choice:"static" choice:"auto" default:"none"`
Port int `long:"port" env:"PORT" description:"port number for https server" default:"8443"`
Cert string `long:"cert" env:"CERT" description:"path to cert.pem file"`
Key string `long:"key" env:"KEY" description:"path to key.pem file"`
ACMELocation string `long:"acme-location" env:"ACME_LOCATION" description:"dir where certificates will be stored by autocert manager" default:"./var/acme"`
ACMEEmail string `long:"acme-email" env:"ACME_EMAIL" description:"admin email for certificate notifications"`
}
// serverApp holds all active objects
type serverApp struct {
*ServerCommand
restSrv *api.Rest
migratorSrv *api.Migrator
exporter migrator.Exporter
devAuth *provider.DevAuthServer
dataService *service.DataStore
avatarStore avatar.Store
notifyService *notify.Service
terminated chan struct{}
}
// Execute is the entry point for "server" command, called by flag parser
func (s *ServerCommand) Execute(args []string) error {
log.Printf("[INFO] start server on port %d", s.Port)
resetEnv("SECRET", "AUTH_GOOGLE_CSEC", "AUTH_GITHUB_CSEC", "AUTH_FACEBOOK_CSEC", "AUTH_YANDEX_CSEC", "ADMIN_PASSWD")
ctx, cancel := context.WithCancel(context.Background())
go func() { // catch signal and invoke graceful termination
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
<-stop
log.Printf("[WARN] interrupt signal")
cancel()
}()
app, err := s.newServerApp()
if err != nil {
log.Printf("[PANIC] failed to setup application, %+v", err)
}
if err = app.run(ctx); err != nil {
log.Printf("[ERROR] remark terminated with error %+v", err)
return err
}
log.Printf("[INFO] remark terminated")
return nil
}
// newServerApp prepares application and return it with all active parts
// doesn't start anything
func (s *ServerCommand) newServerApp() (*serverApp, error) {
if err := makeDirs(s.BackupLocation); err != nil {
return nil, err
}
if !strings.HasPrefix(s.RemarkURL, "http://") && !strings.HasPrefix(s.RemarkURL, "https://") {
return nil, errors.Errorf("invalid remark42 url %s", s.RemarkURL)
}
log.Printf("[INFO] root url=%s", s.RemarkURL)
storeEngine, err := s.makeDataStore()
if err != nil {
return nil, errors.Wrap(err, "failed to make data store engine")
}
adminStore, err := s.makeAdminStore()
if err != nil {
return nil, errors.Wrap(err, "failed to make admin store")
}
dataService := &service.DataStore{
Interface: storeEngine,
EditDuration: s.EditDuration,
AdminStore: adminStore,
MaxCommentSize: s.MaxCommentSize,
MaxVotes: s.MaxVotes,
TitleExtractor: service.NewTitleExtractor(http.Client{Timeout: time.Second * 5}),
}
loadingCache, err := s.makeCache()
if err != nil {
return nil, errors.Wrap(err, "failed to make cache")
}
avatarStore, err := s.makeAvatarStore()
if err != nil {
return nil, errors.Wrap(err, "failed to make avatar store")
}
authenticator := s.makeAuthenticator(dataService, avatarStore, adminStore)
exporter := &migrator.Native{DataStore: dataService}
migr := &api.Migrator{
Cache: loadingCache,
NativeImporter: &migrator.Native{DataStore: dataService},
DisqusImporter: &migrator.Disqus{DataStore: dataService},
WordPressImporter: &migrator.WordPress{DataStore: dataService},
NativeExporter: &migrator.Native{DataStore: dataService},
KeyStore: adminStore,
}
notifyService, err := s.makeNotify(dataService)
if err != nil {
log.Printf("[WARN] failed to make notify service, %s", err)
notifyService = notify.NopService // disable notifier
}
imgProxy := &proxy.Image{Enabled: s.ImageProxy, RoutePath: "/api/v1/img", RemarkURL: s.RemarkURL}
commentFormatter := store.NewCommentFormatter(imgProxy)
sslConfig, err := s.makeSSLConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to make config of ssl server params")
}
srv := &api.Rest{
Version: s.Revision,
DataService: dataService,
WebRoot: s.WebRoot,
RemarkURL: s.RemarkURL,
ImageProxy: imgProxy,
CommentFormatter: commentFormatter,
Migrator: migr,
ReadOnlyAge: s.ReadOnlyAge,
SharedSecret: s.SharedSecret,
Authenticator: authenticator,
Cache: loadingCache,
NotifyService: notifyService,
SSLConfig: sslConfig,
}
srv.ScoreThresholds.Low, srv.ScoreThresholds.Critical = s.LowScore, s.CriticalScore
var devAuth *provider.DevAuthServer
if s.Auth.Dev {
da, err := authenticator.DevAuth()
if err != nil {
return nil, errors.Wrap(err, "can't make dev oauth2 server")
}
devAuth = da
}
return &serverApp{
ServerCommand: s,
restSrv: srv,
migratorSrv: migr,
exporter: exporter,
devAuth: devAuth,
dataService: dataService,
avatarStore: avatarStore,
notifyService: notifyService,
terminated: make(chan struct{}),
}, nil
}
// Run all application objects
func (a *serverApp) run(ctx context.Context) error {
if a.AdminPasswd != "" {
log.Printf("[WARN] admin basic auth enabled")
}
go func() {
// shutdown on context cancellation
<-ctx.Done()
log.Print("[INFO] shutdown initiated")
a.restSrv.Shutdown()
if a.devAuth != nil {
a.devAuth.Shutdown()
}
if e := a.dataService.Close(); e != nil {
log.Printf("[WARN] failed to close data store, %s", e)
}
if e := a.avatarStore.Close(); e != nil {
log.Printf("[WARN] failed to close avatar store, %s", e)
}
a.notifyService.Close()
log.Print("[INFO] shutdown completed")
}()
a.activateBackup(ctx) // runs in goroutine for each site
if a.Auth.Dev {
go a.devAuth.Run(context.Background()) // dev oauth2 server on :8084
}
a.restSrv.Run(a.Port)
close(a.terminated)
return nil
}
// Wait for application completion (termination)
func (a *serverApp) Wait() {
<-a.terminated
}
// activateBackup runs background backups for each site
func (a *serverApp) activateBackup(ctx context.Context) {
for _, siteID := range a.Sites {
backup := migrator.AutoBackup{
Exporter: a.exporter,
BackupLocation: a.BackupLocation,
SiteID: siteID,
KeepMax: a.MaxBackupFiles,
Duration: 24 * time.Hour,
}
go backup.Do(ctx)
}
}
// makeDataStore creates store for all sites
func (s *ServerCommand) makeDataStore() (result engine.Interface, err error) {
log.Printf("[INFO] make data store, type=%s", s.Store.Type)
switch s.Store.Type {
case "bolt":
if err = makeDirs(s.Store.Bolt.Path); err != nil {
return nil, errors.Wrap(err, "failed to create bolt store")
}
sites := []engine.BoltSite{}
for _, site := range s.Sites {
sites = append(sites, engine.BoltSite{SiteID: site, FileName: fmt.Sprintf("%s/%s.db", s.Store.Bolt.Path, site)})
}
result, err = engine.NewBoltDB(bolt.Options{Timeout: s.Store.Bolt.Timeout}, sites...)
case "mongo":
mgServer, e := s.makeMongo()
if e != nil {
return result, errors.Wrap(e, "failed to create mongo server")
}
conn := mongo.NewConnection(mgServer, s.Mongo.DB, "")
result, err = engine.NewMongo(conn, 500, 100*time.Millisecond)
default:
return nil, errors.Errorf("unsupported store type %s", s.Store.Type)
}
return result, errors.Wrap(err, "can't initialize data store")
}
func (s *ServerCommand) makeAvatarStore() (avatar.Store, error) {
log.Printf("[INFO] make avatar store, type=%s", s.Avatar.Type)
switch s.Avatar.Type {
case "fs":
if err := makeDirs(s.Avatar.FS.Path); err != nil {
return nil, err
}
return avatar.NewLocalFS(s.Avatar.FS.Path), nil
case "mongo":
mgServer, err := s.makeMongo()
if err != nil {
return nil, errors.Wrap(err, "failed to create mongo server")
}
conn := mongo.NewConnection(mgServer, s.Mongo.DB, "")
return avatar.NewGridFS(conn), nil
case "bolt":
if err := makeDirs(path.Dir(s.Avatar.Bolt.File)); err != nil {
return nil, err
}
return avatar.NewBoltDB(s.Avatar.Bolt.File, bolt.Options{})
}
return nil, errors.Errorf("unsupported avatar store type %s", s.Avatar.Type)
}
func (s *ServerCommand) makeAdminStore() (admin.Store, error) {
log.Printf("[INFO] make admin store, type=%s", s.Admin.Type)
switch s.Admin.Type {
case "shared":
if s.Admin.Shared.Email == "" { // no admin email, use admin@domain
if u, err := url.Parse(s.RemarkURL); err == nil {
s.Admin.Shared.Email = "admin@" + u.Host
}
}
return admin.NewStaticStore(s.SharedSecret, s.Admin.Shared.Admins, s.Admin.Shared.Email), nil
case "mongo":
mgServer, e := s.makeMongo()
if e != nil {
return nil, errors.Wrap(e, "failed to create mongo server")
}
conn := mongo.NewConnection(mgServer, s.Mongo.DB, "admin")
return admin.NewMongoStore(conn, s.SharedSecret), nil
default:
return nil, errors.Errorf("unsupported admin store type %s", s.Admin.Type)
}
}
func (s *ServerCommand) makeCache() (cache.LoadingCache, error) {
log.Printf("[INFO] make cache, type=%s", s.Cache.Type)
switch s.Cache.Type {
case "mem":
return cache.NewMemoryCache(cache.MaxCacheSize(s.Cache.Max.Size), cache.MaxValSize(s.Cache.Max.Value),
cache.MaxKeys(s.Cache.Max.Items))
// case "mongo":
// mgServer, err := s.makeMongo()
// if err != nil {
// return nil, errors.Wrap(err, "failed to create mongo server")
// }
// conn := mongo.NewConnection(mgServer, s.Mongo.DB, "cache")
// return cache.NewMongoCache(conn, cache.MaxCacheSize(s.Cache.Max.Size), cache.MaxValSize(s.Cache.Max.Value),
// cache.MaxKeys(s.Cache.Max.Items))
case "none":
return &cache.Nop{}, nil
}
return nil, errors.Errorf("unsupported cache type %s", s.Cache.Type)
}
func (s *ServerCommand) makeMongo() (result *mongo.Server, err error) {
if s.Mongo.URL == "" {
return nil, errors.New("no mongo URL provided")
}
return mongo.NewServerWithURL(s.Mongo.URL, 10*time.Second)
}
func (s *ServerCommand) addAuthProviders(authenticator *auth.Service) {
providers := 0
if s.Auth.Google.CID != "" && s.Auth.Google.CSEC != "" {
authenticator.AddProvider("google", s.Auth.Google.CID, s.Auth.Google.CSEC)
providers++
}
if s.Auth.Github.CID != "" && s.Auth.Github.CSEC != "" {
authenticator.AddProvider("github", s.Auth.Github.CID, s.Auth.Github.CSEC)
providers++
}
if s.Auth.Facebook.CID != "" && s.Auth.Facebook.CSEC != "" {
authenticator.AddProvider("facebook", s.Auth.Facebook.CID, s.Auth.Facebook.CSEC)
providers++
}
if s.Auth.Yandex.CID != "" && s.Auth.Yandex.CSEC != "" {
authenticator.AddProvider("yandex", s.Auth.Yandex.CID, s.Auth.Yandex.CSEC)
providers++
}
if s.Auth.Dev {
authenticator.AddProvider("dev", "", "")
providers++
}
if providers == 0 {
log.Printf("[WARN] no auth providers defined")
}
}
func (s *ServerCommand) makeNotify(dataStore *service.DataStore) (*notify.Service, error) {
log.Printf("[INFO] make notify, type=%s", s.Notify.Type)
switch s.Notify.Type {
case "telegram":
tg, err := notify.NewTelegram(s.Notify.Telegram.Token, s.Notify.Telegram.Channel,
s.Notify.Telegram.Timeout, s.Notify.Telegram.API)
if err != nil {
return nil, errors.Wrap(err, "failed to create telegram notification destination")
}
return notify.NewService(dataStore, s.Notify.QueueSize, tg), nil
case "none":
return notify.NopService, nil
}
return nil, errors.Errorf("unsupported notification type %q", s.Notify.Type)
}
func (s *ServerCommand) makeSSLConfig() (config api.SSLConfig, err error) {
switch s.SSL.Type {
case "none":
config.SSLMode = api.None
case "static":
if s.SSL.Cert == "" {
return config, errors.New("path to cert.pem is required")
}
if s.SSL.Key == "" {
return config, errors.New("path to key.pem is required")
}
config.SSLMode = api.Static
config.Port = s.SSL.Port
config.Cert = s.SSL.Cert
config.Key = s.SSL.Key
case "auto":
config.SSLMode = api.Auto
config.Port = s.SSL.Port
config.ACMELocation = s.SSL.ACMELocation
if s.SSL.ACMEEmail != "" {
config.ACMEEmail = s.SSL.ACMEEmail
} else if s.Admin.Type == "shared" && s.Admin.Shared.Email != "" {
config.ACMEEmail = s.Admin.Shared.Email
} else if u, e := url.Parse(s.RemarkURL); e == nil {
config.ACMEEmail = "admin@" + u.Hostname()
}
}
return config, err
}
func (s *ServerCommand) makeAuthenticator(ds *service.DataStore, avas avatar.Store, admns admin.Store) *auth.Service {
authenticator := auth.NewService(auth.Opts{
URL: strings.TrimSuffix(s.RemarkURL, "/"),
Issuer: "remark42",
TokenDuration: s.Auth.TTL.JWT,
CookieDuration: s.Auth.TTL.Cookie,
SecureCookies: strings.HasPrefix(s.RemarkURL, "https://"),
SecretReader: token.SecretFunc(func() (string, error) { // get secret per site
return admns.Key()
}),
ClaimsUpd: token.ClaimsUpdFunc(func(c token.Claims) token.Claims { // set attributes, on new token or refresh
if c.User == nil {
return c
}
c.User.SetAdmin(ds.IsAdmin(c.Audience, c.User.ID))
c.User.SetBoolAttr("blocked", ds.IsBlocked(c.Audience, c.User.ID))
return c
}),
AdminPasswd: s.AdminPasswd,
Validator: token.ValidatorFunc(func(token string, claims token.Claims) bool { // check on each auth call (in middleware)
if claims.User == nil {
return false
}
return !claims.User.BoolAttr("blocked")
}),
AvatarStore: avas,
AvatarResizeLimit: s.Avatar.RszLmt,
AvatarRoutePath: "/api/v1/avatar",
Logger: lgr.Default(),
})
s.addAuthProviders(authenticator)
return authenticator
}
| []
| []
| []
| [] | [] | go | null | null | null |
database/driver.go | // Package database provides the Database interface.
// All database drivers must implement this interface, register themselves,
// optionally provide a `WithInstance` function and pass the tests
// in package database/testing.
package database
import (
"fmt"
"io"
"sync"
iurl "github.com/brettbuddin/migrate/v4/internal/url"
)
var (
ErrLocked = fmt.Errorf("can't acquire lock")
ErrNotLocked = fmt.Errorf("can't unlock, as not currently locked")
)
const NilVersion int = -1
var driversMu sync.RWMutex
var drivers = make(map[string]Driver)
// Driver is the interface every database driver must implement.
//
// How to implement a database driver?
// 1. Implement this interface.
// 2. Optionally, add a function named `WithInstance`.
// This function should accept an existing DB instance and a Config{} struct
// and return a driver instance.
// 3. Add a test that calls database/testing.go:Test()
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
// All other functions are tested by tests in database/testing.
// Saves you some time and makes sure all database drivers behave the same way.
// 5. Call Register in init().
// 6. Create a internal/cli/build_<driver-name>.go file
// 7. Add driver name in 'DATABASE' variable in Makefile
//
// Guidelines:
// * Don't try to correct user input. Don't assume things.
// When in doubt, return an error and explain the situation to the user.
// * All configuration input must come from the URL string in func Open()
// or the Config{} struct in WithInstance. Don't os.Getenv().
type Driver interface {
// Open returns a new driver instance configured with parameters
// coming from the URL string. Migrate will call this function
// only once per instance.
Open(url string) (Driver, error)
// Close closes the underlying database instance managed by the driver.
// Migrate will call this function only once per instance.
Close() error
// Lock should acquire a database lock so that only one migration process
// can run at a time. Migrate will call this function before Run is called.
// If the implementation can't provide this functionality, return nil.
// Return database.ErrLocked if database is already locked.
Lock() error
// Unlock should release the lock. Migrate will call this function after
// all migrations have been run.
Unlock() error
// Run applies a migration to the database. migration is guaranteed to be not nil.
Run(migration io.Reader) error
// SetVersion saves version and dirty state.
// Migrate will call this function before and after each call to Run.
// version must be >= -1. -1 means NilVersion.
SetVersion(version int, dirty bool) error
// Version returns the currently active version and if the database is dirty.
// When no migration has been applied, it must return version -1.
// Dirty means, a previous migration failed and user interaction is required.
Version() (version int, dirty bool, err error)
// Drop deletes everything in the database.
// Note that this is a breaking action, a new call to Open() is necessary to
// ensure subsequent calls work as expected.
Drop() error
}
// Open returns a new driver instance.
func Open(url string) (Driver, error) {
scheme, err := iurl.SchemeFromURL(url)
if err != nil {
return nil, err
}
driversMu.RLock()
d, ok := drivers[scheme]
driversMu.RUnlock()
if !ok {
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", scheme)
}
return d.Open(url)
}
// Register globally registers a driver.
func Register(name string, driver Driver) {
driversMu.Lock()
defer driversMu.Unlock()
if driver == nil {
panic("Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("Register called twice for driver " + name)
}
drivers[name] = driver
}
// List lists the registered drivers
func List() []string {
driversMu.RLock()
defer driversMu.RUnlock()
names := make([]string, 0, len(drivers))
for n := range drivers {
names = append(names, n)
}
return names
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
nilearn/plotting/__init__.py | """
Plotting code for nilearn
"""
# Original Authors: Chris Filo Gorgolewski, Gael Varoquaux
import os
import sys
import importlib
###############################################################################
# Make sure that we don't get DISPLAY problems when running without X on
# unices
def _set_mpl_backend():
# We are doing local imports here to avoid polluting our namespace
try:
import matplotlib
except ImportError:
if importlib.util.find_spec("pytest") is not None:
from .._utils.testing import skip_if_running_tests
# No need to fail when running tests
skip_if_running_tests('matplotlib not installed')
raise
else:
from ..version import (_import_module_with_version_check,
OPTIONAL_MATPLOTLIB_MIN_VERSION)
# When matplotlib was successfully imported we need to check
# that the version is greater that the minimum required one
_import_module_with_version_check('matplotlib',
OPTIONAL_MATPLOTLIB_MIN_VERSION)
current_backend = matplotlib.get_backend().lower()
if 'inline' in current_backend or 'nbagg' in current_backend:
return
# Set the backend to a non-interactive one for unices without X
# (see gh-2560)
if (sys.platform not in ('darwin', 'win32') and
'DISPLAY' not in os.environ):
matplotlib.use('Agg')
_set_mpl_backend()
###############################################################################
from . import cm
from .img_plotting import (
plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map,
plot_glass_brain, plot_connectome, plot_connectome_strength,
plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show)
from .find_cuts import find_xyz_cut_coords, find_cut_slices, \
find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords
from .matrix_plotting import (plot_matrix, plot_contrast_matrix,
plot_design_matrix, plot_event)
from .html_surface import view_surf, view_img_on_surf
from .html_stat_map import view_img
from .html_connectome import view_connectome, view_markers
from .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi,
plot_img_on_surf, plot_surf_contours)
__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',
'plot_roi', 'plot_stat_map', 'plot_glass_brain',
'plot_markers', 'plot_connectome', 'plot_prob_atlas',
'find_xyz_cut_coords', 'find_cut_slices',
'plot_img_comparison',
'show', 'plot_matrix',
'plot_design_matrix', 'plot_contrast_matrix', 'plot_event',
'view_surf', 'view_img_on_surf',
'view_img', 'view_connectome', 'view_markers',
'find_parcellation_cut_coords',
'find_probabilistic_atlas_cut_coords',
'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi',
'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet',
'plot_surf_contours']
| []
| []
| []
| [] | [] | python | 0 | 0 | |
scripts/install-agent.py | import argparse
import json
import logging
import os
import subprocess
from subprocess import Popen
import tempfile
from time import sleep
import sys
logging.basicConfig(level=logging.WARN)
log = logging.getLogger(os.path.basename(__file__))
# determine whether or not the script is being run from an activated environment
# or not. If we are then we need to call this script again from the correct
# python interpreter.
if not hasattr(sys, 'real_prefix'):
inenv = False
else:
inenv = True
if os.environ.get('WAS_CORRECTED'):
corrected = True
else:
corrected = False
# Call the script with the correct environment if we aren't activated yet.
if not inenv and not corrected:
mypath = os.path.dirname(__file__)
# Travis-CI puts the python in a little bit different location than
# we do.
if os.environ.get('CI') is not None:
correct_python =subprocess.check_output(['which', 'python']).strip()
else:
correct_python = os.path.abspath(
os.path.join(mypath, '../env/bin/python'))
if not os.path.exists(correct_python):
log.error("Invalid location for the script {}".format(correct_python))
sys.exit(-10)
# Call this script in a subprocess with the correct python interpreter.
cmds = [correct_python, __file__]
cmds.extend(sys.argv[1:])
process = subprocess.Popen(cmds, env=os.environ)
process.wait()
sys.exit(process.returncode)
from zmq.utils import jsonapi
from volttron.platform import get_address, get_home, get_volttron_root, \
is_instance_running
from volttron.platform.packaging import create_package, add_files_to_package
__version__ = '0.2'
def _build_copy_env(opts):
env = os.environ.copy()
env['VOLTTRON_HOME'] = opts.volttron_home
env['VIP_ADDRESS'] = opts.vip_address
return env
def identity_exists(opts, identity):
env = _build_copy_env(opts)
cmds = [opts.volttron_control, "status"]
process = subprocess.Popen(cmds, env=env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
for x in stdoutdata.split("\n"):
if x:
line_split = x.split()
if identity == line_split[2]:
return line_split[0]
return False
def remove_agent(opts, agent_uuid):
env = _build_copy_env(opts)
cmds = [opts.volttron_control, "remove", agent_uuid]
process = subprocess.Popen(cmds, env=env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
def install_agent(opts, package, config):
"""
The main installation method for installing the agent on the correct local
platform instance.
:param opts:
:param package:
:param config:
:return:
"""
if config is None:
config = {}
# if not a dict then config should be a filename
if not isinstance(config, dict):
config_file = config
else:
cfg = tempfile.NamedTemporaryFile()
with open(cfg.name, 'w') as fout:
fout.write(jsonapi.dumps(config))
config_file = cfg.name
try:
with open(config_file) as fp:
data = json.load(fp)
except:
log.error("Invalid json config file.")
sys.exit(-10)
# Configure the whl file before installing.
add_files_to_package(opts.package, {'config_file': config_file})
env = _build_copy_env(opts)
if opts.vip_identity:
cmds = [opts.volttron_control, "upgrade", opts.vip_identity, package]
else:
cmds = [opts.volttron_control, "install", package]
if opts.tag:
cmds.extend(["--tag", opts.tag])
process = Popen(cmds, env=env, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(output, errorout) = process.communicate()
parsed = output.split("\n")
# If there is not an agent with that identity:
# 'Could not find agent with VIP IDENTITY "BOO". Installing as new agent
# Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 6ccbf8dc-4929-4794-9c8e-3d8c6a121776 listeneragent-3.2'
# The following is standard output of an agent that was previously installed
# If the agent was not previously installed then only the second line
# would have been output to standard out.
#
# Removing previous version of agent "foo"
# Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 81b811ff-02b5-482e-af01-63d2fd95195a listeneragent-3.2
if 'Could not' in parsed[0]:
agent_uuid = parsed[1].split()[-2]
elif 'Removing' in parsed[0]:
agent_uuid = parsed[1].split()[-2]
else:
agent_uuid = parsed[0].split()[-2]
output_dict = dict(agent_uuid=agent_uuid)
if opts.start:
cmds = [opts.volttron_control, "start", agent_uuid]
process = Popen(cmds, env=env, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(outputdata, errordata) = process.communicate()
# Expected output on standard out
# Starting 83856b74-76dc-4bd9-8480-f62bd508aa9c listeneragent-3.2
if 'Starting' in outputdata:
output_dict['starting'] = True
if opts.enable:
cmds = [opts.volttron_control, "enable", agent_uuid]
if opts.priority != -1:
cmds.extend(["--priority", str(opts.priority)])
process = Popen(cmds, env=env, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(outputdata, errordata) = process.communicate()
# Expected output from standard out
# Enabling 6bcee29b-7af3-4361-a67f-7d3c9e986419 listeneragent-3.2 with priority 50
if "Enabling" in outputdata:
output_dict['enabling'] = True
output_dict['priority'] = outputdata.split("\n")[0].split()[-1]
if opts.start:
# Pause for agent_start_time seconds before verifying that the agent
#sleep(opts.agent_start_time)
sleep(0.5)
cmds = [opts.volttron_control, "status", agent_uuid]
process = Popen(cmds, env=env, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(outputdata, errordata) = process.communicate()
# 5 listeneragent-3.2 foo running [10737]
output_dict["started"] = "running" in outputdata
if output_dict["started"]:
pidpos = outputdata.index('[') + 1
pidend = outputdata.index(']')
output_dict['agent_pid'] = int(outputdata[pidpos: pidend])
if opts.json:
sys.stdout.write("%s\n" % json.dumps(output_dict, indent=4))
if opts.csv:
keylen = len(output_dict.keys())
keyline = ''
valueline = ''
keys = output_dict.keys()
for k in range(keylen):
if k < keylen - 1:
keyline += "%s," % keys[k]
valueline += "%s," % output_dict[keys[k]]
else:
keyline += "%s" % keys[k]
valueline += "%s" % output_dict[keys[k]]
sys.stdout.write("%s\n%s\n" % (keyline, valueline))
if __name__ == '__main__':
parser = argparse.ArgumentParser(version=__version__)
parser.add_argument("-a", "--vip-address", default=get_address(),
help="vip-address to connect to.")
parser.add_argument("-vh", "--volttron-home", default=get_home(),
help="local volttron-home for the instance.")
parser.add_argument("-vr", "--volttron-root", default=get_volttron_root(),
help="location of the volttron root on the filesystem.")
parser.add_argument("-s", "--agent-source", required=True,
help="source directory of the agent which is to be installed.")
parser.add_argument("-i", "--vip-identity", default=None,
help="identity of the agent to be installed (unique per instance)")
parser.add_argument("-c", "--config", default=None, type=file,
help="agent configuration file that will be packaged with the agent.")
parser.add_argument("-wh", "--wheelhouse", default=None,
help="location of agents after they have been built")
parser.add_argument("-t", "--tag", default=None,
help="a tag is a means of identifying an agent.")
parser.add_argument("-f", "--force", action='store_true',
help="agents are uninstalled by tag so force allows multiple agents to be removed at one go.")
parser.add_argument("--priority", default=-1, type=int,
help="priority of startup during instance startup")
parser.add_argument("--start", action='store_true',
help="start the agent during the script execution")
parser.add_argument("--enable", action='store_true',
help="enable the agent with default 50 priority unless --priority set")
parser.add_argument("-st", "--agent-start-time", default=5, type=int,
help="the amount of time to wait and verify that the agent has started up.")
parser.add_argument("--csv", action='store_true',
help="format the standard out output to csv")
parser.add_argument("--json", action="store_true",
help="format the standard out output to jso")
opts = parser.parse_args()
agent_source = opts.agent_source
if not os.path.isdir(agent_source):
if os.path.isdir(os.path.join(opts.volttron_root, agent_source)):
agent_source = os.path.join(opts.volttron_root, agent_source)
else:
log.error("Invalid agent source directory specified.")
sys.exit(-10)
opts.agent_source = agent_source
if not os.path.isfile(os.path.join(agent_source, "setup.py")):
log.error("Agent source must contain a setup.py file.")
sys.exit(-10)
if opts.volttron_home.endswith('/'):
log.warn("VOLTTRON_HOME should not have / on the end trimming it.")
opts.volttron_home = opts.volttron_home[:-1]
if not is_instance_running(opts.volttron_home):
log.error("The instance at {} is not running".format(
opts.volttron_home))
sys.exit(-10)
wheelhouse = opts.wheelhouse
if not wheelhouse:
wheelhouse = os.path.join(opts.volttron_home, "packaged")
opts.wheelhouse = wheelhouse
if opts.priority != -1:
if opts.priority < 0 or opts.priority >= 100:
log.error("Invalid priority specified must be between 1, 100")
sys.exit(-10)
opts.enable = True
if opts.json and opts.csv:
opts.csv = False
elif not opts.json and not opts.csv:
opts.json = True
if os.environ.get('CI') is not None:
opts.volttron_control = "volttron-ctl"
else:
opts.volttron_control = os.path.join(opts.volttron_root,
"env/bin/volttron-ctl")
if opts.vip_identity is not None:
# if the identity exists the variable will have the agent uuid in it.
exists = identity_exists(opts, opts.vip_identity)
if exists:
if not opts.force:
log.error(
"identity already exists, but force wasn't specified.")
sys.exit(-10)
# Note we don't remove the agent here because if we do that will
# not allow us to update without losing the keys. The
# install_agent method either installs or upgrades the agent.
if opts.force and opts.vip_identity is None:
# If force is specified then identity must be specified to indicate the target of the force
log.error(
"Force option specified without a target identity to force.")
sys.exit(-10)
opts.package = create_package(agent_source, wheelhouse, opts.vip_identity)
if not os.path.isfile(opts.package):
log.error("The wheel file for the agent was unable to be created.")
sys.exit(-10)
jsonobj = None
if opts.config:
tmpconfigfile = tempfile.NamedTemporaryFile()
with open(tmpconfigfile.name, 'w') as fout:
for line in opts.config:
line = line.partition('#')[0]
if line.rstrip():
fout.write(line.rstrip())
config_file = tmpconfigfile.name
try:
with open(tmpconfigfile.name) as f:
opts.config = jsonapi.loads(f.read())
finally:
tmpconfigfile.close()
if opts.config:
install_agent(opts, opts.package, opts.config)
else:
install_agent(opts, opts.package, {})
| []
| []
| [
"WAS_CORRECTED",
"CI"
]
| [] | ["WAS_CORRECTED", "CI"] | python | 2 | 0 | |
_archived/3rdpkg/go-ipc/fifo/example_process.go | package main
import (
"log"
"os"
"os/exec"
"bitbucket.org/avd/go-ipc/fifo"
)
func _main() {
testData := []byte{1, 2, 3, 4, 5, 6, 7, 8}
if os.Getenv("_WORKER") == "on" {
log.Printf("pid %v: start worker", os.Getpid())
wfifo, err := fifo.New("fifo", os.O_CREATE|os.O_WRONLY, 0666)
panicIf(err)
defer wfifo.Close()
if written, err := wfifo.Write(testData); err != nil || written != len(testData) {
panic("write")
}
return
}
log.Printf("pid %v: start master", os.Getpid())
launchWorker()
buff := make([]byte, len(testData))
rfifo, err := fifo.New("fifo", os.O_CREATE|os.O_RDONLY, 0666)
panicIf(err)
defer rfifo.Close()
if read, err := rfifo.Read(buff); err != nil || read != len(testData) {
panic("read")
}
// ensure we've received valid data
for i, b := range buff {
println(b)
if b != testData[i] {
panic("wrong data")
}
}
}
func panicIf(err error) {
if err != nil {
panic(err)
}
}
func launchWorker() (*os.Process, error) {
// launch worker
bin, err := os.Executable()
if err != nil {
return nil, err
}
cmd := exec.Command(bin)
cmd.Env = append(cmd.Env, "_WORKER=on")
cmd.Env = append(cmd.Env, "TMPDIR="+os.TempDir())
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
return nil, err
}
return cmd.Process, nil
}
| [
"\"_WORKER\""
]
| []
| [
"_WORKER"
]
| [] | ["_WORKER"] | go | 1 | 0 | |
pkg/logger/logger.go | package logger
import (
"os"
"strconv"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var logger = NewLogger()
// NewLogger returns a new instance of zap sugar logger
func NewLogger() *zap.SugaredLogger {
logger := newZap()
defer logger.Sync()
return logger.Sugar()
}
// Debug aliases zap.Debugw to be able to log a message
// with optional context
func Debug(msg string, args ...interface{}) {
logger.Debugw(msg, args...)
}
// Info aliases zap.Infow to be able to log a message
// with optional context
func Info(msg string, args ...interface{}) {
logger.Infow(msg, args...)
}
// Warn aliases zap.Warnw to be able to log a message
// with optional context
func Warn(msg string, args ...interface{}) {
logger.Warnw(msg, args...)
}
// Error aliases zap.Errorw to be able to log a message
// with optional context
func Error(msg string, args ...interface{}) {
logger.Errorw(msg, args...)
}
// Fatal aliases zap.Fatalw to be able to log a message
// with optional context
func Fatal(msg string, args ...interface{}) {
logger.Fatalw(msg, args...)
}
// Debugf aliases zap.Debugf
func Debugf(msg string, args ...interface{}) {
logger.Debugf(msg, args...)
}
// Infof aliases zap.Infof
func Infof(msg string, args ...interface{}) {
logger.Infof(msg, args...)
}
// Warnf aliases zap.Warnf
func Warnf(msg string, args ...interface{}) {
logger.Warnf(msg, args...)
}
// Errorf aliases zap.Errorf
func Errorf(msg string, args ...interface{}) {
logger.Errorf(msg, args...)
}
// Fatalf aliases zap.Fatalf
func Fatalf(msg string, args ...interface{}) {
logger.Fatalf(msg, args)
}
func newZap() *zap.Logger {
// send anything above or equal to error level to stderr
highPriority := zap.LevelEnablerFunc(func(loggingLvl zapcore.Level) bool {
return loggingLvl >= zapcore.ErrorLevel
})
// send everything less than error level to stdout
// except debug level when debugging is turned off and vice versa
lowPriority := zap.LevelEnablerFunc(func(loggingLvl zapcore.Level) bool {
isLessThanErr := loggingLvl < zapcore.ErrorLevel
if isDebugMode() {
return isLessThanErr
}
return isLessThanErr && loggingLvl != zapcore.DebugLevel
})
consoleDebugging := zapcore.Lock(os.Stdout)
consoleErrors := zapcore.Lock(os.Stderr)
consoleEncoder := zapcore.NewJSONEncoder(getEncoderCfg())
core := zapcore.NewTee(
zapcore.NewCore(consoleEncoder, consoleErrors, highPriority),
zapcore.NewCore(consoleEncoder, consoleDebugging, lowPriority),
)
return zap.New(core,
zap.AddCallerSkip(1),
zap.AddCaller(),
zap.AddStacktrace(highPriority), // add stack traces for levels above >=error only
)
}
func getEncoderCfg() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
TimeKey: "timestamp",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "message",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.CapitalLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
}
func isDebugMode() bool {
mode := os.Getenv("DEBUG")
modeBool, _ := strconv.ParseBool(mode)
return modeBool
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
spotify/player_test.go | // +build integration
package spotify
import (
"context"
"os"
"testing"
"time"
"github.com/camphor-/relaym-server/domain/service"
"golang.org/x/oauth2"
"github.com/camphor-/relaym-server/config"
)
func TestClient_CurrentlyPlaying(t *testing.T) {
tests := []struct {
name string
want bool
wantErr bool
}{
{
name: "再生中ではないときfalse",
want: false,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewClient(config.NewSpotify())
token := &oauth2.Token{
AccessToken: "",
TokenType: "Bearer",
RefreshToken: os.Getenv("SPOTIFY_REFRESH_TOKEN_FOR_TEST"),
Expiry: time.Now(),
}
token, err := c.Refresh(token)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
ctx = service.SetTokenToContext(ctx, token)
got, err := c.CurrentlyPlaying(ctx)
if (err != nil) != tt.wantErr {
t.Errorf("CurrentlyPlaying() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got.Playing != tt.want {
t.Errorf("CurrentlyPlaying() got = %v, want %v", got, tt.want)
}
})
}
}
func TestClient_Play(t *testing.T) {
tests := []struct {
name string
wantErr bool
}{
{
name: "現在の曲が再生される",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewClient(config.NewSpotify())
token := &oauth2.Token{
AccessToken: "",
TokenType: "Bearer",
RefreshToken: os.Getenv("SPOTIFY_REFRESH_TOKEN_FOR_TEST"),
Expiry: time.Now(),
}
token, err := c.Refresh(token)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
ctx = service.SetTokenToContext(ctx, token)
if err := c.Play(ctx, ""); (err != nil) != tt.wantErr {
t.Errorf("Play() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestClient_Pause(t *testing.T) {
tests := []struct {
name string
wantErr bool
}{
{
name: "曲が一時停止される",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewClient(config.NewSpotify())
token := &oauth2.Token{
AccessToken: "",
TokenType: "Bearer",
RefreshToken: os.Getenv("SPOTIFY_REFRESH_TOKEN_FOR_TEST"),
Expiry: time.Now(),
}
token, err := c.Refresh(token)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
ctx = service.SetTokenToContext(ctx, token)
if err := c.Pause(ctx, ""); (err != nil) != tt.wantErr {
t.Errorf("Pause() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestClient_Enqueue(t *testing.T) {
tests := []struct {
name string
trackID string
wantErr bool
}{
{
name: "曲をqueueに追加できる",
trackID: "spotify:track:49BRCNV7E94s7Q2FUhhT3w",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewClient(config.NewSpotify())
token := &oauth2.Token{
AccessToken: "",
TokenType: "Bearer",
RefreshToken: os.Getenv("SPOTIFY_REFRESH_TOKEN_FOR_TEST"),
Expiry: time.Now(),
}
token, err := c.Refresh(token)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
ctx = service.SetTokenToContext(ctx, token)
if err := c.Enqueue(ctx, tt.trackID, ""); (err != nil) != tt.wantErr {
t.Errorf("Enqueue() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestClient_SetRepeatMode(t *testing.T) {
tests := []struct {
name string
on bool
wantErr bool
}{
{
name: "リピートモードをオフにできる",
on: false,
wantErr: false,
},
{
name: "リピートモードをオンにできる",
on: true,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewClient(config.NewSpotify())
token := &oauth2.Token{
AccessToken: "",
TokenType: "Bearer",
RefreshToken: os.Getenv("SPOTIFY_REFRESH_TOKEN_FOR_TEST"),
Expiry: time.Now(),
}
token, err := c.Refresh(token)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
ctx = service.SetTokenToContext(ctx, token)
if err := c.SetRepeatMode(ctx, tt.on, ""); (err != nil) != tt.wantErr {
t.Errorf("SetRepeatMode() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
// テスト前にSpotify側で「次に再生される曲」を積んでください
func TestClient_DeleteAllTracksInQueueTracks(t *testing.T) {
tests := []struct {
name string
wantErr string
}{
{
name: "正常に動作する",
wantErr: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewClient(config.NewSpotify())
token := &oauth2.Token{
AccessToken: "",
TokenType: "Bearer",
RefreshToken: os.Getenv("SPOTIFY_REFRESH_TOKEN_FOR_TEST"),
Expiry: time.Now(),
}
token, err := c.Refresh(token)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
ctx = service.SetTokenToContext(ctx, token)
if err := c.DeleteAllTracksInQueue(ctx, "", "spotify:track:5uQ0vKy2973Y9IUCd1wMEF"); err.Error() != tt.wantErr {
t.Errorf("DeleteAllTracksInQueue() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
| [
"\"SPOTIFY_REFRESH_TOKEN_FOR_TEST\"",
"\"SPOTIFY_REFRESH_TOKEN_FOR_TEST\"",
"\"SPOTIFY_REFRESH_TOKEN_FOR_TEST\"",
"\"SPOTIFY_REFRESH_TOKEN_FOR_TEST\"",
"\"SPOTIFY_REFRESH_TOKEN_FOR_TEST\"",
"\"SPOTIFY_REFRESH_TOKEN_FOR_TEST\""
]
| []
| [
"SPOTIFY_REFRESH_TOKEN_FOR_TEST"
]
| [] | ["SPOTIFY_REFRESH_TOKEN_FOR_TEST"] | go | 1 | 0 | |
sourcecode7/src/sun/font/SunFontManager.java | /*
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.font;
import java.awt.Font;
import java.awt.FontFormatException;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.StringTokenizer;
import java.util.TreeMap;
import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import javax.swing.plaf.FontUIResource;
import sun.awt.AppContext;
import sun.awt.FontConfiguration;
import sun.awt.SunToolkit;
import sun.java2d.FontSupport;
import sun.util.logging.PlatformLogger;
/**
* The base implementation of the {@link FontManager} interface. It implements
* the platform independent, shared parts of OpenJDK's FontManager
* implementations. The platform specific parts are declared as abstract
* methods that have to be implemented by specific implementations.
*/
public abstract class SunFontManager implements FontSupport, FontManagerForSGE {
private static class TTFilter implements FilenameFilter {
public boolean accept(File dir, String name) {
/* all conveniently have the same suffix length */
int offset = name.length() - 4;
if (offset <= 0) { /* must be at least A.ttf */
return false;
} else {
return (name.startsWith(".ttf", offset) || name.startsWith(".TTF", offset) ||
name.startsWith(".ttc", offset) || name.startsWith(".TTC", offset) ||
name.startsWith(".otf", offset) || name.startsWith(".OTF", offset));
}
}
}
private static class T1Filter implements FilenameFilter {
public boolean accept(File dir, String name) {
if (noType1Font) {
return false;
}
/* all conveniently have the same suffix length */
int offset = name.length() - 4;
if (offset <= 0) { /* must be at least A.pfa */
return false;
} else {
return (name.startsWith(".pfa", offset) || name.startsWith(".pfb", offset) ||
name.startsWith(".PFA", offset) || name.startsWith(".PFB", offset));
}
}
}
private static class TTorT1Filter implements FilenameFilter {
public boolean accept(File dir, String name) {
/* all conveniently have the same suffix length */
int offset = name.length() - 4;
if (offset <= 0) { /* must be at least A.ttf or A.pfa */
return false;
} else {
boolean isTT = name.startsWith(".ttf", offset) || name.startsWith(".TTF", offset) ||
name.startsWith(".ttc", offset) || name.startsWith(".TTC", offset) ||
name.startsWith(".otf", offset) || name.startsWith(".OTF", offset);
if (isTT) {
return true;
} else if (noType1Font) {
return false;
} else {
return (name.startsWith(".pfa", offset) || name.startsWith(".pfb", offset) ||
name.startsWith(".PFA", offset) || name.startsWith(".PFB", offset));
}
}
}
}
public static final int FONTFORMAT_NONE = -1;
public static final int FONTFORMAT_TRUETYPE = 0;
public static final int FONTFORMAT_TYPE1 = 1;
public static final int FONTFORMAT_T2K = 2;
public static final int FONTFORMAT_TTC = 3;
public static final int FONTFORMAT_COMPOSITE = 4;
public static final int FONTFORMAT_NATIVE = 5;
/* Pool of 20 font file channels chosen because some UTF-8 locale
* composite fonts can use up to 16 platform fonts (including the
* Lucida fall back). This should prevent channel thrashing when
* dealing with one of these fonts.
* The pool array stores the fonts, rather than directly referencing
* the channels, as the font needs to do the open/close work.
*/
private static final int CHANNELPOOLSIZE = 20;
private int lastPoolIndex = 0;
private FileFont fontFileCache[] = new FileFont[CHANNELPOOLSIZE];
/* Need to implement a simple linked list scheme for fast
* traversal and lookup.
* Also want to "fast path" dialog so there's minimal overhead.
*/
/* There are at exactly 20 composite fonts: 5 faces (but some are not
* usually different), in 4 styles. The array may be auto-expanded
* later if more are needed, eg for user-defined composites or locale
* variants.
*/
private int maxCompFont = 0;
private CompositeFont[] compFonts = new CompositeFont[20];
private ConcurrentHashMap<String, CompositeFont> compositeFonts = new ConcurrentHashMap<String, CompositeFont>();
private ConcurrentHashMap<String, PhysicalFont> physicalFonts = new ConcurrentHashMap<String, PhysicalFont>();
private ConcurrentHashMap<String, PhysicalFont> registeredFonts = new ConcurrentHashMap<String, PhysicalFont>();
/* given a full name find the Font. Remind: there's duplication
* here in that this contains the content of compositeFonts +
* physicalFonts.
*/
private ConcurrentHashMap<String, Font2D> fullNameToFont = new ConcurrentHashMap<String, Font2D>();
/* TrueType fonts have localised names. Support searching all
* of these before giving up on a name.
*/
private HashMap<String, TrueTypeFont> localeFullNamesToFont;
private PhysicalFont defaultPhysicalFont;
static boolean longAddresses;
private boolean loaded1dot0Fonts = false;
boolean loadedAllFonts = false;
boolean loadedAllFontFiles = false;
HashMap<String, String> jreFontMap;
HashSet<String> jreLucidaFontFiles;
String[] jreOtherFontFiles;
boolean noOtherJREFontFiles = false; // initial assumption.
public static final String lucidaFontName = "Lucida Sans Regular";
public static String jreLibDirName;
public static String jreFontDirName;
private static HashSet<String> missingFontFiles = null;
private String defaultFontName;
private String defaultFontFileName;
protected HashSet registeredFontFiles = new HashSet();
private ArrayList badFonts;
/* fontPath is the location of all fonts on the system, excluding the
* JRE's own font directory but including any path specified using the
* sun.java2d.fontpath property. Together with that property, it is
* initialised by the getPlatformFontPath() method
* This call must be followed by a call to registerFontDirs(fontPath)
* once any extra debugging path has been appended.
*/
protected String fontPath;
private FontConfiguration fontConfig;
/* discoveredAllFonts is set to true when all fonts on the font path are
* discovered. This usually also implies opening, validating and
* registering, but an implementation may be optimized to avold this.
* So see also "loadedAllFontFiles"
*/
private boolean discoveredAllFonts = false;
/* No need to keep consing up new instances - reuse a singleton.
* The trade-off is that these objects don't get GC'd.
*/
private static final FilenameFilter ttFilter = new TTFilter();
private static final FilenameFilter t1Filter = new T1Filter();
private Font[] allFonts;
private String[] allFamilies; // cache for default locale only
private Locale lastDefaultLocale;
public static boolean noType1Font;
/* Used to indicate required return type from toArray(..); */
private static String[] STR_ARRAY = new String[0];
/**
* Deprecated, unsupported hack - actually invokes a bug!
* Left in for a customer, don't remove.
*/
private boolean usePlatformFontMetrics = false;
/**
* Returns the global SunFontManager instance. This is similar to
* {@link FontManagerFactory#getInstance()} but it returns a
* SunFontManager instance instead. This is only used in internal classes
* where we can safely assume that a SunFontManager is to be used.
*
* @return the global SunFontManager instance
*/
public static SunFontManager getInstance() {
FontManager fm = FontManagerFactory.getInstance();
return (SunFontManager) fm;
}
public FilenameFilter getTrueTypeFilter() {
return ttFilter;
}
public FilenameFilter getType1Filter() {
return t1Filter;
}
@Override
public boolean usingPerAppContextComposites() {
return _usingPerAppContextComposites;
}
private void initJREFontMap() {
/* Key is familyname+style value as an int.
* Value is filename containing the font.
* If no mapping exists, it means there is no font file for the style
* If the mapping exists but the file doesn't exist in the deferred
* list then it means its not installed.
* This looks like a lot of code and strings but if it saves even
* a single file being opened at JRE start-up there's a big payoff.
* Lucida Sans is probably the only important case as the others
* are rarely used. Consider removing the other mappings if there's
* no evidence they are useful in practice.
*/
jreFontMap = new HashMap<String, String>();
jreLucidaFontFiles = new HashSet<String>();
if (isOpenJDK()) {
return;
}
/* Lucida Sans Family */
jreFontMap.put("lucida sans0", "LucidaSansRegular.ttf");
jreFontMap.put("lucida sans1", "LucidaSansDemiBold.ttf");
/* Lucida Sans full names (map Bold and DemiBold to same file) */
jreFontMap.put("lucida sans regular0", "LucidaSansRegular.ttf");
jreFontMap.put("lucida sans regular1", "LucidaSansDemiBold.ttf");
jreFontMap.put("lucida sans bold1", "LucidaSansDemiBold.ttf");
jreFontMap.put("lucida sans demibold1", "LucidaSansDemiBold.ttf");
/* Lucida Sans Typewriter Family */
jreFontMap.put("lucida sans typewriter0", "LucidaTypewriterRegular.ttf");
jreFontMap.put("lucida sans typewriter1", "LucidaTypewriterBold.ttf");
/* Typewriter full names (map Bold and DemiBold to same file) */
jreFontMap.put("lucida sans typewriter regular0", "LucidaTypewriter.ttf");
jreFontMap.put("lucida sans typewriter regular1", "LucidaTypewriterBold.ttf");
jreFontMap.put("lucida sans typewriter bold1", "LucidaTypewriterBold.ttf");
jreFontMap.put("lucida sans typewriter demibold1", "LucidaTypewriterBold.ttf");
/* Lucida Bright Family */
jreFontMap.put("lucida bright0", "LucidaBrightRegular.ttf");
jreFontMap.put("lucida bright1", "LucidaBrightDemiBold.ttf");
jreFontMap.put("lucida bright2", "LucidaBrightItalic.ttf");
jreFontMap.put("lucida bright3", "LucidaBrightDemiItalic.ttf");
/* Lucida Bright full names (map Bold and DemiBold to same file) */
jreFontMap.put("lucida bright regular0", "LucidaBrightRegular.ttf");
jreFontMap.put("lucida bright regular1", "LucidaBrightDemiBold.ttf");
jreFontMap.put("lucida bright regular2", "LucidaBrightItalic.ttf");
jreFontMap.put("lucida bright regular3", "LucidaBrightDemiItalic.ttf");
jreFontMap.put("lucida bright bold1", "LucidaBrightDemiBold.ttf");
jreFontMap.put("lucida bright bold3", "LucidaBrightDemiItalic.ttf");
jreFontMap.put("lucida bright demibold1", "LucidaBrightDemiBold.ttf");
jreFontMap.put("lucida bright demibold3", "LucidaBrightDemiItalic.ttf");
jreFontMap.put("lucida bright italic2", "LucidaBrightItalic.ttf");
jreFontMap.put("lucida bright italic3", "LucidaBrightDemiItalic.ttf");
jreFontMap.put("lucida bright bold italic3", "LucidaBrightDemiItalic.ttf");
jreFontMap.put("lucida bright demibold italic3", "LucidaBrightDemiItalic.ttf");
for (String ffile : jreFontMap.values()) {
jreLucidaFontFiles.add(ffile);
}
}
static {
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
FontManagerNativeLibrary.load();
// JNI throws an exception if a class/method/field is not found,
// so there's no need to do anything explicit here.
initIDs();
switch (StrikeCache.nativeAddressSize) {
case 8:
longAddresses = true;
break;
case 4:
longAddresses = false;
break;
default:
throw new RuntimeException("Unexpected address size");
}
noType1Font = "true".equals(System.getProperty("sun.java2d.noType1Font"));
jreLibDirName = System.getProperty("java.home", "") + File.separator + "lib";
jreFontDirName = jreLibDirName + File.separator + "fonts";
File lucidaFile = new File(jreFontDirName + File.separator + FontUtilities.LUCIDA_FILE_NAME);
return null;
}
});
}
public TrueTypeFont getEUDCFont() {
// Overridden in Windows.
return null;
}
/* Initialise ptrs used by JNI methods */
private static native void initIDs();
@SuppressWarnings("unchecked")
protected SunFontManager() {
initJREFontMap();
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
File badFontFile = new File(jreFontDirName + File.separator + "badfonts.txt");
if (badFontFile.exists()) {
FileInputStream fis = null;
try {
badFonts = new ArrayList();
fis = new FileInputStream(badFontFile);
InputStreamReader isr = new InputStreamReader(fis);
BufferedReader br = new BufferedReader(isr);
while (true) {
String name = br.readLine();
if (name == null) {
break;
} else {
if (FontUtilities.debugFonts()) {
FontUtilities.getLogger().warning("read bad font: " + name);
}
badFonts.add(name);
}
}
} catch (IOException e) {
try {
if (fis != null) {
fis.close();
}
} catch (IOException ioe) {
}
}
}
/* Here we get the fonts in jre/lib/fonts and register
* them so they are always available and preferred over
* other fonts. This needs to be registered before the
* composite fonts as otherwise some native font that
* corresponds may be found as we don't have a way to
* handle two fonts of the same name, so the JRE one
* must be the first one registered. Pass "true" to
* registerFonts method as on-screen these JRE fonts
* always go through the T2K rasteriser.
*/
if (FontUtilities.isLinux) {
/* Linux font configuration uses these fonts */
registerFontDir(jreFontDirName);
}
registerFontsInDir(jreFontDirName, true, Font2D.JRE_RANK, true, false);
/* Create the font configuration and get any font path
* that might be specified.
*/
fontConfig = createFontConfiguration();
if (isOpenJDK()) {
String[] fontInfo = getDefaultPlatformFont();
defaultFontName = fontInfo[0];
defaultFontFileName = fontInfo[1];
}
String extraFontPath = fontConfig.getExtraFontPath();
/* In prior releases the debugging font path replaced
* all normally located font directories except for the
* JRE fonts dir. This directory is still always located
* and placed at the head of the path but as an
* augmentation to the previous behaviour the
* changes below allow you to additionally append to
* the font path by starting with append: or prepend by
* starting with a prepend: sign. Eg: to append
* -Dsun.java2d.fontpath=append:/usr/local/myfonts
* and to prepend
* -Dsun.java2d.fontpath=prepend:/usr/local/myfonts Disp
*
* If there is an appendedfontpath it in the font
* configuration it is used instead of searching the
* system for dirs.
* The behaviour of append and prepend is then similar
* to the normal case. ie it goes after what
* you prepend and * before what you append. If the
* sun.java2d.fontpath property is used, but it
* neither the append or prepend syntaxes is used then
* as except for the JRE dir the path is replaced and it
* is up to you to make sure that all the right
* directories are located. This is platform and
* locale-specific so its almost impossible to get
* right, so it should be used with caution.
*/
boolean prependToPath = false;
boolean appendToPath = false;
String dbgFontPath = System.getProperty("sun.java2d.fontpath");
if (dbgFontPath != null) {
if (dbgFontPath.startsWith("prepend:")) {
prependToPath = true;
dbgFontPath = dbgFontPath.substring("prepend:".length());
} else if (dbgFontPath.startsWith("append:")) {
appendToPath = true;
dbgFontPath = dbgFontPath.substring("append:".length());
}
}
if (FontUtilities.debugFonts()) {
PlatformLogger logger = FontUtilities.getLogger();
logger.info("JRE font directory: " + jreFontDirName);
logger.info("Extra font path: " + extraFontPath);
logger.info("Debug font path: " + dbgFontPath);
}
if (dbgFontPath != null) {
/* In debugging mode we register all the paths
* Caution: this is a very expensive call on Solaris:-
*/
fontPath = getPlatformFontPath(noType1Font);
if (extraFontPath != null) {
fontPath = extraFontPath + File.pathSeparator + fontPath;
}
if (appendToPath) {
fontPath = fontPath + File.pathSeparator + dbgFontPath;
} else if (prependToPath) {
fontPath = dbgFontPath + File.pathSeparator + fontPath;
} else {
fontPath = dbgFontPath;
}
registerFontDirs(fontPath);
} else if (extraFontPath != null) {
/* If the font configuration contains an
* "appendedfontpath" entry, it is interpreted as a
* set of locations that should always be registered.
* It may be additional to locations normally found
* for that place, or it may be locations that need
* to have all their paths registered to locate all
* the needed platform names.
* This is typically when the same .TTF file is
* referenced from multiple font.dir files and all
* of these must be read to find all the native
* (XLFD) names for the font, so that X11 font APIs
* can be used for as many code points as possible.
*/
registerFontDirs(extraFontPath);
}
/* On Solaris, we need to register the Japanese TrueType
* directory so that we can find the corresponding
* bitmap fonts. This could be done by listing the
* directory in the font configuration file, but we
* don't want to confuse users with this quirk. There
* are no bitmap fonts for other writing systems that
* correspond to TrueType fonts and have matching XLFDs.
* We need to register the bitmap fonts only in
* environments where they're on the X font path, i.e.,
* in the Japanese locale. Note that if the X Toolkit
* is in use the font path isn't set up by JDK, but
* users of a JA locale should have it
* set up already by their login environment.
*/
if (FontUtilities.isSolaris && Locale.JAPAN.equals(Locale.getDefault())) {
registerFontDir("/usr/openwin/lib/locale/ja/X11/fonts/TT");
}
initCompositeFonts(fontConfig, null);
return null;
}
});
boolean platformFont = AccessController.doPrivileged(new PrivilegedAction<Boolean>() {
public Boolean run() {
String prop = System.getProperty("java2d.font.usePlatformFont");
String env = System.getenv("JAVA2D_USEPLATFORMFONT");
return "true".equals(prop) || env != null;
}
});
if (platformFont) {
usePlatformFontMetrics = true;
System.out.println("Enabling platform font metrics for win32. This is an unsupported option.");
System.out.println("This yields incorrect composite font metrics as reported by 1.1.x releases.");
System.out.println("It is appropriate only for use by applications which do not use any Java 2");
System.out.println("functionality. This property will be removed in a later release.");
}
}
/**
* This method is provided for internal and exclusive use by Swing.
*
* @param font representing a physical font.
* @return true if the underlying font is a TrueType or OpenType font
* that claims to support the Microsoft Windows encoding corresponding to
* the default file.encoding property of this JRE instance.
* This narrow value is useful for Swing to decide if the font is useful
* for the the Windows Look and Feel, or, if a composite font should be
* used instead.
* The information used to make the decision is obtained from
* the ulCodePageRange fields in the font.
* A caller can use isLogicalFont(Font) in this class before calling
* this method and would not need to call this method if that
* returns true.
*/
// static boolean fontSupportsDefaultEncoding(Font font) {
// String encoding =
// (String) java.security.AccessController.doPrivileged(
// new sun.security.action.GetPropertyAction("file.encoding"));
// if (encoding == null || font == null) {
// return false;
// }
// encoding = encoding.toLowerCase(Locale.ENGLISH);
// return FontManager.fontSupportsEncoding(font, encoding);
// }
public Font2DHandle getNewComposite(String family, int style, Font2DHandle handle) {
if (!(handle.font2D instanceof CompositeFont)) {
return handle;
}
CompositeFont oldComp = (CompositeFont) handle.font2D;
PhysicalFont oldFont = oldComp.getSlotFont(0);
if (family == null) {
family = oldFont.getFamilyName(null);
}
if (style == -1) {
style = oldComp.getStyle();
}
Font2D newFont = findFont2D(family, style, NO_FALLBACK);
if (!(newFont instanceof PhysicalFont)) {
newFont = oldFont;
}
PhysicalFont physicalFont = (PhysicalFont) newFont;
CompositeFont dialog2D = (CompositeFont) findFont2D("dialog", style, NO_FALLBACK);
if (dialog2D == null) { /* shouldn't happen */
return handle;
}
CompositeFont compFont = new CompositeFont(physicalFont, dialog2D);
Font2DHandle newHandle = new Font2DHandle(compFont);
return newHandle;
}
protected void registerCompositeFont(String compositeName, String[] componentFileNames, String[] componentNames,
int numMetricsSlots, int[] exclusionRanges, int[] exclusionMaxIndex, boolean defer) {
CompositeFont cf = new CompositeFont(compositeName, componentFileNames, componentNames, numMetricsSlots,
exclusionRanges, exclusionMaxIndex, defer, this);
addCompositeToFontList(cf, Font2D.FONT_CONFIG_RANK);
synchronized (compFonts) {
compFonts[maxCompFont++] = cf;
}
}
/* This variant is used only when the application specifies
* a variant of composite fonts which prefers locale specific or
* proportional fonts.
*/
protected static void registerCompositeFont(String compositeName, String[] componentFileNames,
String[] componentNames, int numMetricsSlots, int[] exclusionRanges, int[] exclusionMaxIndex, boolean defer,
ConcurrentHashMap<String, Font2D> altNameCache) {
CompositeFont cf = new CompositeFont(compositeName, componentFileNames, componentNames, numMetricsSlots,
exclusionRanges, exclusionMaxIndex, defer, SunFontManager.getInstance());
/* if the cache has an existing composite for this case, make
* its handle point to this new font.
* This ensures that when the altNameCache that is passed in
* is the global mapNameCache - ie we are running as an application -
* that any statically created java.awt.Font instances which already
* have a Font2D instance will have that re-directed to the new Font
* on subsequent uses. This is particularly important for "the"
* default font instance, or similar cases where a UI toolkit (eg
* Swing) has cached a java.awt.Font. Note that if Swing is using
* a custom composite APIs which update the standard composites have
* no effect - this is typically the case only when using the Windows
* L&F where these APIs would conflict with that L&F anyway.
*/
Font2D oldFont = (Font2D) altNameCache.get(compositeName.toLowerCase(Locale.ENGLISH));
if (oldFont instanceof CompositeFont) {
oldFont.handle.font2D = cf;
}
altNameCache.put(compositeName.toLowerCase(Locale.ENGLISH), cf);
}
private void addCompositeToFontList(CompositeFont f, int rank) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Add to Family " + f.familyName + ", Font " + f.fullName + " rank=" + rank);
}
f.setRank(rank);
compositeFonts.put(f.fullName, f);
fullNameToFont.put(f.fullName.toLowerCase(Locale.ENGLISH), f);
FontFamily family = FontFamily.getFamily(f.familyName);
if (family == null) {
family = new FontFamily(f.familyName, true, rank);
}
family.setFont(f, f.style);
}
/*
* Systems may have fonts with the same name.
* We want to register only one of such fonts (at least until
* such time as there might be APIs which can accommodate > 1).
* Rank is 1) font configuration fonts, 2) JRE fonts, 3) OT/TT fonts,
* 4) Type1 fonts, 5) native fonts.
*
* If the new font has the same name as the old font, the higher
* ranked font gets added, replacing the lower ranked one.
* If the fonts are of equal rank, then make a special case of
* font configuration rank fonts, which are on closer inspection,
* OT/TT fonts such that the larger font is registered. This is
* a heuristic since a font may be "larger" in the sense of more
* code points, or be a larger "file" because it has more bitmaps.
* So it is possible that using filesize may lead to less glyphs, and
* using glyphs may lead to lower quality display. Probably number
* of glyphs is the ideal, but filesize is information we already
* have and is good enough for the known cases.
* Also don't want to register fonts that match JRE font families
* but are coming from a source other than the JRE.
* This will ensure that we will algorithmically style the JRE
* plain font and get the same set of glyphs for all styles.
*
* Note that this method returns a value
* if it returns the same object as its argument that means this
* font was newly registered.
* If it returns a different object it means this font already exists,
* and you should use that one.
* If it returns null means this font was not registered and none
* in that name is registered. The caller must find a substitute
*/
private PhysicalFont addToFontList(PhysicalFont f, int rank) {
String fontName = f.fullName;
String familyName = f.familyName;
if (fontName == null || "".equals(fontName)) {
return null;
}
if (compositeFonts.containsKey(fontName)) {
/* Don't register any font that has the same name as a composite */
return null;
}
f.setRank(rank);
if (!physicalFonts.containsKey(fontName)) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Add to Family " + familyName + ", Font " + fontName + " rank=" + rank);
}
physicalFonts.put(fontName, f);
FontFamily family = FontFamily.getFamily(familyName);
if (family == null) {
family = new FontFamily(familyName, false, rank);
family.setFont(f, f.style);
} else if (family.getRank() >= rank) {
family.setFont(f, f.style);
}
fullNameToFont.put(fontName.toLowerCase(Locale.ENGLISH), f);
return f;
} else {
PhysicalFont newFont = f;
PhysicalFont oldFont = physicalFonts.get(fontName);
if (oldFont == null) {
return null;
}
/* If the new font is of an equal or higher rank, it is a
* candidate to replace the current one, subject to further tests.
*/
if (oldFont.getRank() >= rank) {
/* All fonts initialise their mapper when first
* used. If the mapper is non-null then this font
* has been accessed at least once. In that case
* do not replace it. This may be overly stringent,
* but its probably better not to replace a font that
* someone is already using without a compelling reason.
* Additionally the primary case where it is known
* this behaviour is important is in certain composite
* fonts, and since all the components of a given
* composite are usually initialised together this
* is unlikely. For this to be a problem, there would
* have to be a case where two different composites used
* different versions of the same-named font, and they
* were initialised and used at separate times.
* In that case we continue on and allow the new font to
* be installed, but replaceFont will continue to allow
* the original font to be used in Composite fonts.
*/
if (oldFont.mapper != null && rank > Font2D.FONT_CONFIG_RANK) {
return oldFont;
}
/* Normally we require a higher rank to replace a font,
* but as a special case, if the two fonts are the same rank,
* and are instances of TrueTypeFont we want the
* more complete (larger) one.
*/
if (oldFont.getRank() == rank) {
if (oldFont instanceof TrueTypeFont && newFont instanceof TrueTypeFont) {
TrueTypeFont oldTTFont = (TrueTypeFont) oldFont;
TrueTypeFont newTTFont = (TrueTypeFont) newFont;
if (oldTTFont.fileSize >= newTTFont.fileSize) {
return oldFont;
}
} else {
return oldFont;
}
}
/* Don't replace ever JRE fonts.
* This test is in case a font configuration references
* a Lucida font, which has been mapped to a Lucida
* from the host O/S. The assumption here is that any
* such font configuration file is probably incorrect, or
* the host O/S version is for the use of AWT.
* In other words if we reach here, there's a possible
* problem with our choice of font configuration fonts.
*/
if (oldFont.platName.startsWith(jreFontDirName)) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().warning(
"Unexpected attempt to replace a JRE " + " font " + fontName + " from " + oldFont.platName +
" with " + newFont.platName);
}
return oldFont;
}
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info(
"Replace in Family " + familyName + ",Font " + fontName + " new rank=" + rank + " from " +
oldFont.platName + " with " + newFont.platName);
}
replaceFont(oldFont, newFont);
physicalFonts.put(fontName, newFont);
fullNameToFont.put(fontName.toLowerCase(Locale.ENGLISH), newFont);
FontFamily family = FontFamily.getFamily(familyName);
if (family == null) {
family = new FontFamily(familyName, false, rank);
family.setFont(newFont, newFont.style);
} else if (family.getRank() >= rank) {
family.setFont(newFont, newFont.style);
}
return newFont;
} else {
return oldFont;
}
}
}
public Font2D[] getRegisteredFonts() {
PhysicalFont[] physFonts = getPhysicalFonts();
int mcf = maxCompFont; /* for MT-safety */
Font2D[] regFonts = new Font2D[physFonts.length + mcf];
System.arraycopy(compFonts, 0, regFonts, 0, mcf);
System.arraycopy(physFonts, 0, regFonts, mcf, physFonts.length);
return regFonts;
}
protected PhysicalFont[] getPhysicalFonts() {
return physicalFonts.values().toArray(new PhysicalFont[0]);
}
/* The class FontRegistrationInfo is used when a client says not
* to register a font immediately. This mechanism is used to defer
* initialisation of all the components of composite fonts at JRE
* start-up. The CompositeFont class is "aware" of this and when it
* is first used it asks for the registration of its components.
* Also in the event that any physical font is requested the
* deferred fonts are initialised before triggering a search of the
* system.
* Two maps are used. One to track the deferred fonts. The
* other to track the fonts that have been initialised through this
* mechanism.
*/
private static final class FontRegistrationInfo {
String fontFilePath;
String[] nativeNames;
int fontFormat;
boolean javaRasterizer;
int fontRank;
FontRegistrationInfo(String fontPath, String[] names, int format, boolean useJavaRasterizer, int rank) {
this.fontFilePath = fontPath;
this.nativeNames = names;
this.fontFormat = format;
this.javaRasterizer = useJavaRasterizer;
this.fontRank = rank;
}
}
private final ConcurrentHashMap<String, FontRegistrationInfo> deferredFontFiles
= new ConcurrentHashMap<String, FontRegistrationInfo>();
private final ConcurrentHashMap<String, Font2DHandle> initialisedFonts
= new ConcurrentHashMap<String, Font2DHandle>();
/* Remind: possibly enhance initialiseDeferredFonts() to be
* optionally given a name and a style and it could stop when it
* finds that font - but this would be a problem if two of the
* fonts reference the same font face name (cf the Solaris
* euro fonts).
*/
protected synchronized void initialiseDeferredFonts() {
for (String fileName : deferredFontFiles.keySet()) {
initialiseDeferredFont(fileName);
}
}
protected synchronized void registerDeferredJREFonts(String jreDir) {
for (FontRegistrationInfo info : deferredFontFiles.values()) {
if (info.fontFilePath != null && info.fontFilePath.startsWith(jreDir)) {
initialiseDeferredFont(info.fontFilePath);
}
}
}
public boolean isDeferredFont(String fileName) {
return deferredFontFiles.containsKey(fileName);
}
/* We keep a map of the files which contain the Lucida fonts so we
* don't need to search for them.
* But since we know what fonts these files contain, we can also avoid
* opening them to look for a font name we don't recognise - see
* findDeferredFont().
* For typical cases where the font isn't a JRE one the overhead is
* this method call, HashMap.get() and null reference test, then
* a boolean test of noOtherJREFontFiles.
*/
public
/*private*/ PhysicalFont findJREDeferredFont(String name, int style) {
PhysicalFont physicalFont;
String nameAndStyle = name.toLowerCase(Locale.ENGLISH) + style;
String fileName = jreFontMap.get(nameAndStyle);
if (fileName != null) {
fileName = jreFontDirName + File.separator + fileName;
if (deferredFontFiles.get(fileName) != null) {
physicalFont = initialiseDeferredFont(fileName);
if (physicalFont != null && (physicalFont.getFontName(null).equalsIgnoreCase(name) ||
physicalFont.getFamilyName(null).equalsIgnoreCase(name)) && physicalFont.style == style) {
return physicalFont;
}
}
}
/* Iterate over the deferred font files looking for any in the
* jre directory that we didn't recognise, open each of these.
* In almost all installations this will quickly fall through
* because only the Lucidas will be present and jreOtherFontFiles
* will be empty.
* noOtherJREFontFiles is used so we can skip this block as soon
* as its determined that its not needed - almost always after the
* very first time through.
*/
if (noOtherJREFontFiles) {
return null;
}
synchronized (jreLucidaFontFiles) {
if (jreOtherFontFiles == null) {
HashSet<String> otherFontFiles = new HashSet<String>();
for (String deferredFile : deferredFontFiles.keySet()) {
File file = new File(deferredFile);
String dir = file.getParent();
String fname = file.getName();
/* skip names which aren't absolute, aren't in the JRE
* directory, or are known Lucida fonts.
*/
if (dir == null || !dir.equals(jreFontDirName) || jreLucidaFontFiles.contains(fname)) {
continue;
}
otherFontFiles.add(deferredFile);
}
jreOtherFontFiles = otherFontFiles.toArray(STR_ARRAY);
if (jreOtherFontFiles.length == 0) {
noOtherJREFontFiles = true;
}
}
for (int i = 0; i < jreOtherFontFiles.length; i++) {
fileName = jreOtherFontFiles[i];
if (fileName == null) {
continue;
}
jreOtherFontFiles[i] = null;
physicalFont = initialiseDeferredFont(fileName);
if (physicalFont != null && (physicalFont.getFontName(null).equalsIgnoreCase(name) ||
physicalFont.getFamilyName(null).equalsIgnoreCase(name)) && physicalFont.style == style) {
return physicalFont;
}
}
}
return null;
}
/* This skips JRE installed fonts. */
private PhysicalFont findOtherDeferredFont(String name, int style) {
for (String fileName : deferredFontFiles.keySet()) {
File file = new File(fileName);
String dir = file.getParent();
String fname = file.getName();
if (dir != null && dir.equals(jreFontDirName) && jreLucidaFontFiles.contains(fname)) {
continue;
}
PhysicalFont physicalFont = initialiseDeferredFont(fileName);
if (physicalFont != null && (physicalFont.getFontName(null).equalsIgnoreCase(name) ||
physicalFont.getFamilyName(null).equalsIgnoreCase(name)) && physicalFont.style == style) {
return physicalFont;
}
}
return null;
}
private PhysicalFont findDeferredFont(String name, int style) {
PhysicalFont physicalFont = findJREDeferredFont(name, style);
if (physicalFont != null) {
return physicalFont;
} else {
return findOtherDeferredFont(name, style);
}
}
public void registerDeferredFont(String fileNameKey, String fullPathName, String[] nativeNames, int fontFormat,
boolean useJavaRasterizer, int fontRank) {
FontRegistrationInfo regInfo = new FontRegistrationInfo(fullPathName, nativeNames, fontFormat,
useJavaRasterizer, fontRank);
deferredFontFiles.put(fileNameKey, regInfo);
}
public synchronized PhysicalFont initialiseDeferredFont(String fileNameKey) {
if (fileNameKey == null) {
return null;
}
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Opening deferred font file " + fileNameKey);
}
PhysicalFont physicalFont;
FontRegistrationInfo regInfo = deferredFontFiles.get(fileNameKey);
if (regInfo != null) {
deferredFontFiles.remove(fileNameKey);
physicalFont = registerFontFile(regInfo.fontFilePath, regInfo.nativeNames, regInfo.fontFormat,
regInfo.javaRasterizer, regInfo.fontRank);
if (physicalFont != null) {
/* Store the handle, so that if a font is bad, we
* retrieve the substituted font.
*/
initialisedFonts.put(fileNameKey, physicalFont.handle);
} else {
initialisedFonts.put(fileNameKey, getDefaultPhysicalFont().handle);
}
} else {
Font2DHandle handle = initialisedFonts.get(fileNameKey);
if (handle == null) {
/* Probably shouldn't happen, but just in case */
physicalFont = getDefaultPhysicalFont();
} else {
physicalFont = (PhysicalFont) (handle.font2D);
}
}
return physicalFont;
}
public boolean isRegisteredFontFile(String name) {
return registeredFonts.containsKey(name);
}
public PhysicalFont getRegisteredFontFile(String name) {
return registeredFonts.get(name);
}
/* Note that the return value from this method is not always
* derived from this file, and may be null. See addToFontList for
* some explanation of this.
*/
public PhysicalFont registerFontFile(String fileName, String[] nativeNames, int fontFormat,
boolean useJavaRasterizer, int fontRank) {
PhysicalFont regFont = registeredFonts.get(fileName);
if (regFont != null) {
return regFont;
}
PhysicalFont physicalFont = null;
try {
String name;
switch (fontFormat) {
case FONTFORMAT_TRUETYPE:
int fn = 0;
TrueTypeFont ttf;
do {
ttf = new TrueTypeFont(fileName, nativeNames, fn++, useJavaRasterizer);
PhysicalFont pf = addToFontList(ttf, fontRank);
if (physicalFont == null) {
physicalFont = pf;
}
} while (fn < ttf.getFontCount());
break;
case FONTFORMAT_TYPE1:
Type1Font t1f = new Type1Font(fileName, nativeNames);
physicalFont = addToFontList(t1f, fontRank);
break;
case FONTFORMAT_NATIVE:
NativeFont nf = new NativeFont(fileName, false);
physicalFont = addToFontList(nf, fontRank);
default:
}
if (FontUtilities.isLogging()) {
FontUtilities.getLogger()
.info("Registered file " + fileName + " as font " + physicalFont + " rank=" + fontRank);
}
} catch (FontFormatException ffe) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().warning("Unusable font: " + fileName + " " + ffe.toString());
}
}
if (physicalFont != null && fontFormat != FONTFORMAT_NATIVE) {
registeredFonts.put(fileName, physicalFont);
}
return physicalFont;
}
public void registerFonts(String[] fileNames, String[][] nativeNames, int fontCount, int fontFormat,
boolean useJavaRasterizer, int fontRank, boolean defer) {
for (int i = 0; i < fontCount; i++) {
if (defer) {
registerDeferredFont(fileNames[i], fileNames[i], nativeNames[i], fontFormat, useJavaRasterizer,
fontRank);
} else {
registerFontFile(fileNames[i], nativeNames[i], fontFormat, useJavaRasterizer, fontRank);
}
}
}
/*
* This is the Physical font used when some other font on the system
* can't be located. There has to be at least one font or the font
* system is not useful and the graphics environment cannot sustain
* the Java platform.
*/
public PhysicalFont getDefaultPhysicalFont() {
if (defaultPhysicalFont == null) {
/* findFont2D will load all fonts before giving up the search.
* If the JRE Lucida isn't found (eg because the JRE fonts
* directory is missing), it could find another version of Lucida
* from the host system. This is OK because at that point we are
* trying to gracefully handle/recover from a system
* misconfiguration and this is probably a reasonable substitution.
*/
defaultPhysicalFont = (PhysicalFont) findFont2D("Lucida Sans Regular", Font.PLAIN, NO_FALLBACK);
if (defaultPhysicalFont == null) {
defaultPhysicalFont = (PhysicalFont) findFont2D("Arial", Font.PLAIN, NO_FALLBACK);
}
if (defaultPhysicalFont == null) {
/* Because of the findFont2D call above, if we reach here, we
* know all fonts have already been loaded, just accept any
* match at this point. If this fails we are in real trouble
* and I don't know how to recover from there being absolutely
* no fonts anywhere on the system.
*/
Iterator i = physicalFonts.values().iterator();
if (i.hasNext()) {
defaultPhysicalFont = (PhysicalFont) i.next();
} else {
throw new Error("Probable fatal error:No fonts found.");
}
}
}
return defaultPhysicalFont;
}
public CompositeFont getDefaultLogicalFont(int style) {
return (CompositeFont) findFont2D("dialog", style, NO_FALLBACK);
}
/*
* return String representation of style prepended with "."
* This is useful for performance to avoid unnecessary string operations.
*/
private static String dotStyleStr(int num) {
switch (num) {
case Font.BOLD:
return ".bold";
case Font.ITALIC:
return ".italic";
case Font.ITALIC | Font.BOLD:
return ".bolditalic";
default:
return ".plain";
}
}
/* This is implemented only on windows and is called from code that
* executes only on windows. This isn't pretty but its not a precedent
* in this file. This very probably should be cleaned up at some point.
*/
protected void populateFontFileNameMap(HashMap<String, String> fontToFileMap,
HashMap<String, String> fontToFamilyNameMap, HashMap<String, ArrayList<String>> familyToFontListMap,
Locale locale) {
}
/* Obtained from Platform APIs (windows only)
* Map from lower-case font full name to basename of font file.
* Eg "arial bold" -> ARIALBD.TTF.
* For TTC files, there is a mapping for each font in the file.
*/
private HashMap<String, String> fontToFileMap = null;
/* Obtained from Platform APIs (windows only)
* Map from lower-case font full name to the name of its font family
* Eg "arial bold" -> "Arial"
*/
private HashMap<String, String> fontToFamilyNameMap = null;
/* Obtained from Platform APIs (windows only)
* Map from a lower-case family name to a list of full names of
* the member fonts, eg:
* "arial" -> ["Arial", "Arial Bold", "Arial Italic","Arial Bold Italic"]
*/
private HashMap<String, ArrayList<String>> familyToFontListMap = null;
/* The directories which contain platform fonts */
private String[] pathDirs = null;
private boolean haveCheckedUnreferencedFontFiles;
private String[] getFontFilesFromPath(boolean noType1) {
final FilenameFilter filter;
if (noType1) {
filter = ttFilter;
} else {
filter = new TTorT1Filter();
}
return (String[]) AccessController.doPrivileged(new PrivilegedAction() {
public Object run() {
if (pathDirs.length == 1) {
File dir = new File(pathDirs[0]);
String[] files = dir.list(filter);
if (files == null) {
return new String[0];
}
for (int f = 0; f < files.length; f++) {
files[f] = files[f].toLowerCase();
}
return files;
} else {
ArrayList<String> fileList = new ArrayList<String>();
for (int i = 0; i < pathDirs.length; i++) {
File dir = new File(pathDirs[i]);
String[] files = dir.list(filter);
if (files == null) {
continue;
}
for (int f = 0; f < files.length; f++) {
fileList.add(files[f].toLowerCase());
}
}
return fileList.toArray(STR_ARRAY);
}
}
});
}
/* This is needed since some windows registry names don't match
* the font names.
* - UPC styled font names have a double space, but the
* registry entry mapping to a file doesn't.
* - Marlett is in a hidden file not listed in the registry
* - The registry advertises that the file david.ttf contains a
* font with the full name "David Regular" when in fact its
* just "David".
* Directly fix up these known cases as this is faster.
* If a font which doesn't match these known cases has no file,
* it may be a font that has been temporarily added to the known set
* or it may be an installed font with a missing registry entry.
* Installed fonts are those in the windows font directories.
* Make a best effort attempt to locate these.
* We obtain the list of TrueType fonts in these directories and
* filter out all the font files we already know about from the registry.
* What remains may be "bad" fonts, duplicate fonts, or perhaps the
* missing font(s) we are looking for.
* Open each of these files to find out.
*/
private void resolveWindowsFonts() {
ArrayList<String> unmappedFontNames = null;
for (String font : fontToFamilyNameMap.keySet()) {
String file = fontToFileMap.get(font);
if (file == null) {
if (font.indexOf(" ") > 0) {
String newName = font.replaceFirst(" ", " ");
file = fontToFileMap.get(newName);
/* If this name exists and isn't for a valid name
* replace the mapping to the file with this font
*/
if (file != null && !fontToFamilyNameMap.containsKey(newName)) {
fontToFileMap.remove(newName);
fontToFileMap.put(font, file);
}
} else if (font.equals("marlett")) {
fontToFileMap.put(font, "marlett.ttf");
} else if (font.equals("david")) {
file = fontToFileMap.get("david regular");
if (file != null) {
fontToFileMap.remove("david regular");
fontToFileMap.put("david", file);
}
} else {
if (unmappedFontNames == null) {
unmappedFontNames = new ArrayList<String>();
}
unmappedFontNames.add(font);
}
}
}
if (unmappedFontNames != null) {
HashSet<String> unmappedFontFiles = new HashSet<String>();
/* Every font key in fontToFileMap ought to correspond to a
* font key in fontToFamilyNameMap. Entries that don't seem
* to correspond are likely fonts that were named differently
* by GDI than in the registry. One known cause of this is when
* Windows has had its regional settings changed so that from
* GDI we get a localised (eg Chinese or Japanese) name for the
* font, but the registry retains the English version of the name
* that corresponded to the "install" locale for windows.
* Since we are in this code block because there are unmapped
* font names, we can look to find unused font->file mappings
* and then open the files to read the names. We don't generally
* want to open font files, as its a performance hit, but this
* occurs only for a small number of fonts on specific system
* configs - ie is believed that a "true" Japanese windows would
* have JA names in the registry too.
* Clone fontToFileMap and remove from the clone all keys which
* match a fontToFamilyNameMap key. What remains maps to the
* files we want to open to find the fonts GDI returned.
* A font in such a file is added to the fontToFileMap after
* checking its one of the unmappedFontNames we are looking for.
* The original name that didn't map is removed from fontToFileMap
* so essentially this "fixes up" fontToFileMap to use the same
* name as GDI.
* Also note that typically the fonts for which this occurs in
* CJK locales are TTC fonts and not all fonts in a TTC may have
* localised names. Eg MSGOTHIC.TTC contains 3 fonts and one of
* them "MS UI Gothic" has no JA name whereas the other two do.
* So not every font in these files is unmapped or new.
*/
HashMap<String, String> ffmapCopy = (HashMap<String, String>) (fontToFileMap.clone());
for (String key : fontToFamilyNameMap.keySet()) {
ffmapCopy.remove(key);
}
for (String key : ffmapCopy.keySet()) {
unmappedFontFiles.add(ffmapCopy.get(key));
fontToFileMap.remove(key);
}
resolveFontFiles(unmappedFontFiles, unmappedFontNames);
/* If there are still unmapped font names, this means there's
* something that wasn't in the registry. We need to get all
* the font files directly and look at the ones that weren't
* found in the registry.
*/
if (unmappedFontNames.size() > 0) {
/* getFontFilesFromPath() returns all lower case names.
* To compare we also need lower case
* versions of the names from the registry.
*/
ArrayList<String> registryFiles = new ArrayList<String>();
for (String regFile : fontToFileMap.values()) {
registryFiles.add(regFile.toLowerCase());
}
/* We don't look for Type1 files here as windows will
* not enumerate these, so aren't useful in reconciling
* GDI's unmapped files. We do find these later when
* we enumerate all fonts.
*/
for (String pathFile : getFontFilesFromPath(true)) {
if (!registryFiles.contains(pathFile)) {
unmappedFontFiles.add(pathFile);
}
}
resolveFontFiles(unmappedFontFiles, unmappedFontNames);
}
/* remove from the set of names that will be returned to the
* user any fonts that can't be mapped to files.
*/
if (unmappedFontNames.size() > 0) {
int sz = unmappedFontNames.size();
for (int i = 0; i < sz; i++) {
String name = unmappedFontNames.get(i);
String familyName = fontToFamilyNameMap.get(name);
if (familyName != null) {
ArrayList family = familyToFontListMap.get(familyName);
if (family != null) {
if (family.size() <= 1) {
familyToFontListMap.remove(familyName);
}
}
}
fontToFamilyNameMap.remove(name);
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("No file for font:" + name);
}
}
}
}
}
/**
* In some cases windows may have fonts in the fonts folder that
* don't show up in the registry or in the GDI calls to enumerate fonts.
* The only way to find these is to list the directory. We invoke this
* only in getAllFonts/Families, so most searches for a specific
* font that is satisfied by the GDI/registry calls don't take the
* additional hit of listing the directory. This hit is small enough
* that its not significant in these 'enumerate all the fonts' cases.
* The basic approach is to cross-reference the files windows found
* with the ones in the directory listing approach, and for each
* in the latter list that is missing from the former list, register it.
*/
private synchronized void checkForUnreferencedFontFiles() {
if (haveCheckedUnreferencedFontFiles) {
return;
}
haveCheckedUnreferencedFontFiles = true;
if (!FontUtilities.isWindows) {
return;
}
/* getFontFilesFromPath() returns all lower case names.
* To compare we also need lower case
* versions of the names from the registry.
*/
ArrayList<String> registryFiles = new ArrayList<String>();
for (String regFile : fontToFileMap.values()) {
registryFiles.add(regFile.toLowerCase());
}
/* To avoid any issues with concurrent modification, create
* copies of the existing maps, add the new fonts into these
* and then replace the references to the old ones with the
* new maps. ConcurrentHashmap is another option but its a lot
* more changes and with this exception, these maps are intended
* to be static.
*/
HashMap<String, String> fontToFileMap2 = null;
HashMap<String, String> fontToFamilyNameMap2 = null;
HashMap<String, ArrayList<String>> familyToFontListMap2 = null;
;
for (String pathFile : getFontFilesFromPath(false)) {
if (!registryFiles.contains(pathFile)) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Found non-registry file : " + pathFile);
}
PhysicalFont f = registerFontFile(getPathName(pathFile));
if (f == null) {
continue;
}
if (fontToFileMap2 == null) {
fontToFileMap2 = new HashMap<String, String>(fontToFileMap);
fontToFamilyNameMap2 = new HashMap<String, String>(fontToFamilyNameMap);
familyToFontListMap2 = new HashMap<String, ArrayList<String>>(familyToFontListMap);
}
String fontName = f.getFontName(null);
String family = f.getFamilyName(null);
String familyLC = family.toLowerCase();
fontToFamilyNameMap2.put(fontName, family);
fontToFileMap2.put(fontName, pathFile);
ArrayList<String> fonts = familyToFontListMap2.get(familyLC);
if (fonts == null) {
fonts = new ArrayList<String>();
} else {
fonts = new ArrayList<String>(fonts);
}
fonts.add(fontName);
familyToFontListMap2.put(familyLC, fonts);
}
}
if (fontToFileMap2 != null) {
fontToFileMap = fontToFileMap2;
familyToFontListMap = familyToFontListMap2;
fontToFamilyNameMap = fontToFamilyNameMap2;
}
}
private void resolveFontFiles(HashSet<String> unmappedFiles, ArrayList<String> unmappedFonts) {
Locale l = SunToolkit.getStartupLocale();
for (String file : unmappedFiles) {
try {
int fn = 0;
TrueTypeFont ttf;
String fullPath = getPathName(file);
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Trying to resolve file " + fullPath);
}
do {
ttf = new TrueTypeFont(fullPath, null, fn++, false);
// prefer the font's locale name.
String fontName = ttf.getFontName(l).toLowerCase();
if (unmappedFonts.contains(fontName)) {
fontToFileMap.put(fontName, file);
unmappedFonts.remove(fontName);
if (FontUtilities.isLogging()) {
FontUtilities.getLogger()
.info("Resolved absent registry entry for " + fontName + " located in " + fullPath);
}
}
} while (fn < ttf.getFontCount());
} catch (Exception e) {
}
}
}
/* Hardwire the English names and expected file names of fonts
* commonly used at start up. Avoiding until later even the small
* cost of calling platform APIs to locate these can help.
* The code that registers these fonts needs to "bail" if any
* of the files do not exist, so it will verify the existence of
* all non-null file names first.
* They are added in to a map with nominally the first
* word in the name of the family as the key. In all the cases
* we are using the the family name is a single word, and as is
* more or less required the family name is the initial sequence
* in a full name. So lookup first finds the matching description,
* then registers the whole family, returning the right font.
*/
public static class FamilyDescription {
public String familyName;
public String plainFullName;
public String boldFullName;
public String italicFullName;
public String boldItalicFullName;
public String plainFileName;
public String boldFileName;
public String italicFileName;
public String boldItalicFileName;
}
static HashMap<String, FamilyDescription> platformFontMap;
/**
* default implementation does nothing.
*/
public HashMap<String, FamilyDescription> populateHardcodedFileNameMap() {
return new HashMap<String, FamilyDescription>(0);
}
Font2D findFontFromPlatformMap(String lcName, int style) {
if (platformFontMap == null) {
platformFontMap = populateHardcodedFileNameMap();
}
if (platformFontMap == null || platformFontMap.size() == 0) {
return null;
}
int spaceIndex = lcName.indexOf(' ');
String firstWord = lcName;
if (spaceIndex > 0) {
firstWord = lcName.substring(0, spaceIndex);
}
FamilyDescription fd = platformFontMap.get(firstWord);
if (fd == null) {
return null;
}
/* Once we've established that its at least the first word,
* we need to dig deeper to make sure its a match for either
* a full name, or the family name, to make sure its not
* a request for some other font that just happens to start
* with the same first word.
*/
int styleIndex = -1;
if (lcName.equalsIgnoreCase(fd.plainFullName)) {
styleIndex = 0;
} else if (lcName.equalsIgnoreCase(fd.boldFullName)) {
styleIndex = 1;
} else if (lcName.equalsIgnoreCase(fd.italicFullName)) {
styleIndex = 2;
} else if (lcName.equalsIgnoreCase(fd.boldItalicFullName)) {
styleIndex = 3;
}
if (styleIndex == -1 && !lcName.equalsIgnoreCase(fd.familyName)) {
return null;
}
String plainFile = null, boldFile = null, italicFile = null, boldItalicFile = null;
boolean failure = false;
/* In a terminal server config, its possible that getPathName()
* will return null, if the file doesn't exist, hence the null
* checks on return. But in the normal client config we need to
* follow this up with a check to see if all the files really
* exist for the non-null paths.
*/
getPlatformFontDirs(noType1Font);
if (fd.plainFileName != null) {
plainFile = getPathName(fd.plainFileName);
if (plainFile == null) {
failure = true;
}
}
if (fd.boldFileName != null) {
boldFile = getPathName(fd.boldFileName);
if (boldFile == null) {
failure = true;
}
}
if (fd.italicFileName != null) {
italicFile = getPathName(fd.italicFileName);
if (italicFile == null) {
failure = true;
}
}
if (fd.boldItalicFileName != null) {
boldItalicFile = getPathName(fd.boldItalicFileName);
if (boldItalicFile == null) {
failure = true;
}
}
if (failure) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().
info("Hardcoded file missing looking for " + lcName);
}
platformFontMap.remove(firstWord);
return null;
}
/* Some of these may be null,as not all styles have to exist */
final String[] files = { plainFile, boldFile, italicFile, boldItalicFile };
failure = java.security.AccessController.doPrivileged(new java.security.PrivilegedAction<Boolean>() {
public Boolean run() {
for (int i = 0; i < files.length; i++) {
if (files[i] == null) {
continue;
}
File f = new File(files[i]);
if (!f.exists()) {
return Boolean.TRUE;
}
}
return Boolean.FALSE;
}
});
if (failure) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().
info("Hardcoded file missing looking for " + lcName);
}
platformFontMap.remove(firstWord);
return null;
}
/* If we reach here we know that we have all the files we
* expect, so all should be fine so long as the contents
* are what we'd expect. Now on to registering the fonts.
* Currently this code only looks for TrueType fonts, so format
* and rank can be specified without looking at the filename.
*/
Font2D font = null;
for (int f = 0; f < files.length; f++) {
if (files[f] == null) {
continue;
}
PhysicalFont pf = registerFontFile(files[f], null, FONTFORMAT_TRUETYPE, false, Font2D.TTF_RANK);
if (f == styleIndex) {
font = pf;
}
}
/* Two general cases need a bit more work here.
* 1) If font is null, then it was perhaps a request for a
* non-existent font, such as "Tahoma Italic", or a family name -
* where family and full name of the plain font differ.
* Fall back to finding the closest one in the family.
* This could still fail if a client specified "Segoe" instead of
* "Segoe UI".
* 2) The request is of the form "MyFont Bold", style=Font.ITALIC,
* and so we want to see if there's a Bold Italic font, or
* "MyFamily", style=Font.BOLD, and we may have matched the plain,
* but now need to revise that to the BOLD font.
*/
FontFamily fontFamily = FontFamily.getFamily(fd.familyName);
if (fontFamily != null) {
if (font == null) {
font = fontFamily.getFont(style);
if (font == null) {
font = fontFamily.getClosestStyle(style);
}
} else if (style > 0 && style != font.style) {
style |= font.style;
font = fontFamily.getFont(style);
if (font == null) {
font = fontFamily.getClosestStyle(style);
}
}
}
return font;
}
private synchronized HashMap<String, String> getFullNameToFileMap() {
if (fontToFileMap == null) {
pathDirs = getPlatformFontDirs(noType1Font);
fontToFileMap = new HashMap<String, String>(100);
fontToFamilyNameMap = new HashMap<String, String>(100);
familyToFontListMap = new HashMap<String, ArrayList<String>>(50);
populateFontFileNameMap(fontToFileMap, fontToFamilyNameMap, familyToFontListMap, Locale.ENGLISH);
if (FontUtilities.isWindows) {
resolveWindowsFonts();
}
if (FontUtilities.isLogging()) {
logPlatformFontInfo();
}
}
return fontToFileMap;
}
private void logPlatformFontInfo() {
PlatformLogger logger = FontUtilities.getLogger();
for (int i = 0; i < pathDirs.length; i++) {
logger.info("fontdir=" + pathDirs[i]);
}
for (String keyName : fontToFileMap.keySet()) {
logger.info("font=" + keyName + " file=" + fontToFileMap.get(keyName));
}
for (String keyName : fontToFamilyNameMap.keySet()) {
logger.info("font=" + keyName + " family=" + fontToFamilyNameMap.get(keyName));
}
for (String keyName : familyToFontListMap.keySet()) {
logger.info("family=" + keyName + " fonts=" + familyToFontListMap.get(keyName));
}
}
/* Note this return list excludes logical fonts and JRE fonts */
protected String[] getFontNamesFromPlatform() {
if (getFullNameToFileMap().size() == 0) {
return null;
}
checkForUnreferencedFontFiles();
/* This odd code with TreeMap is used to preserve a historical
* behaviour wrt the sorting order .. */
ArrayList<String> fontNames = new ArrayList<String>();
for (ArrayList<String> a : familyToFontListMap.values()) {
for (String s : a) {
fontNames.add(s);
}
}
return fontNames.toArray(STR_ARRAY);
}
public boolean gotFontsFromPlatform() {
return getFullNameToFileMap().size() != 0;
}
public String getFileNameForFontName(String fontName) {
String fontNameLC = fontName.toLowerCase(Locale.ENGLISH);
return fontToFileMap.get(fontNameLC);
}
private PhysicalFont registerFontFile(String file) {
if (new File(file).isAbsolute() && !registeredFonts.contains(file)) {
int fontFormat = FONTFORMAT_NONE;
int fontRank = Font2D.UNKNOWN_RANK;
if (ttFilter.accept(null, file)) {
fontFormat = FONTFORMAT_TRUETYPE;
fontRank = Font2D.TTF_RANK;
} else if (t1Filter.accept(null, file)) {
fontFormat = FONTFORMAT_TYPE1;
fontRank = Font2D.TYPE1_RANK;
}
if (fontFormat == FONTFORMAT_NONE) {
return null;
}
return registerFontFile(file, null, fontFormat, false, fontRank);
}
return null;
}
/* Used to register any font files that are found by platform APIs
* that weren't previously found in the standard font locations.
* the isAbsolute() check is needed since that's whats stored in the
* set, and on windows, the fonts in the system font directory that
* are in the fontToFileMap are just basenames. We don't want to try
* to register those again, but we do want to register other registry
* installed fonts.
*/
protected void registerOtherFontFiles(HashSet registeredFontFiles) {
if (getFullNameToFileMap().size() == 0) {
return;
}
for (String file : fontToFileMap.values()) {
registerFontFile(file);
}
}
public boolean getFamilyNamesFromPlatform(TreeMap<String, String> familyNames, Locale requestedLocale) {
if (getFullNameToFileMap().size() == 0) {
return false;
}
checkForUnreferencedFontFiles();
for (String name : fontToFamilyNameMap.values()) {
familyNames.put(name.toLowerCase(requestedLocale), name);
}
return true;
}
/* Path may be absolute or a base file name relative to one of
* the platform font directories
*/
private String getPathName(final String s) {
File f = new File(s);
if (f.isAbsolute()) {
return s;
} else if (pathDirs.length == 1) {
return pathDirs[0] + File.separator + s;
} else {
String path = java.security.AccessController.doPrivileged(new java.security.PrivilegedAction<String>() {
public String run() {
for (int p = 0; p < pathDirs.length; p++) {
File f = new File(pathDirs[p] + File.separator + s);
if (f.exists()) {
return f.getAbsolutePath();
}
}
return null;
}
});
if (path != null) {
return path;
}
}
return s; // shouldn't happen, but harmless
}
/* lcName is required to be lower case for use as a key.
* lcName may be a full name, or a family name, and style may
* be specified in addition to either of these. So be sure to
* get the right one. Since an app *could* ask for "Foo Regular"
* and later ask for "Foo Italic", if we don't register all the
* styles, then logic in findFont2D may try to style the original
* so we register the entire family if we get a match here.
* This is still a big win because this code is invoked where
* otherwise we would register all fonts.
* It's also useful for the case where "Foo Bold" was specified with
* style Font.ITALIC, as we would want in that case to try to return
* "Foo Bold Italic" if it exists, and it is only by locating "Foo Bold"
* and opening it that we really "know" it's Bold, and can look for
* a font that supports that and the italic style.
* The code in here is not overtly windows-specific but in fact it
* is unlikely to be useful as is on other platforms. It is maintained
* in this shared source file to be close to its sole client and
* because so much of the logic is intertwined with the logic in
* findFont2D.
*/
private Font2D findFontFromPlatform(String lcName, int style) {
if (getFullNameToFileMap().size() == 0) {
return null;
}
ArrayList<String> family = null;
String fontFile = null;
String familyName = fontToFamilyNameMap.get(lcName);
if (familyName != null) {
fontFile = fontToFileMap.get(lcName);
family = familyToFontListMap.get(familyName.toLowerCase(Locale.ENGLISH));
} else {
family = familyToFontListMap.get(lcName); // is lcName is a family?
if (family != null && family.size() > 0) {
String lcFontName = family.get(0).toLowerCase(Locale.ENGLISH);
if (lcFontName != null) {
familyName = fontToFamilyNameMap.get(lcFontName);
}
}
}
if (family == null || familyName == null) {
return null;
}
String[] fontList = (String[]) family.toArray(STR_ARRAY);
if (fontList.length == 0) {
return null;
}
/* first check that for every font in this family we can find
* a font file. The specific reason for doing this is that
* in at least one case on Windows a font has the face name "David"
* but the registry entry is "David Regular". That is the "unique"
* name of the font but in other cases the registry contains the
* "full" name. See the specifications of name ids 3 and 4 in the
* TrueType 'name' table.
* In general this could cause a problem that we fail to register
* if we all members of a family that we may end up mapping to
* the wrong font member: eg return Bold when Plain is needed.
*/
for (int f = 0; f < fontList.length; f++) {
String fontNameLC = fontList[f].toLowerCase(Locale.ENGLISH);
String fileName = fontToFileMap.get(fontNameLC);
if (fileName == null) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger()
.info("Platform lookup : No file for font " + fontList[f] + " in family " + familyName);
}
return null;
}
}
/* Currently this code only looks for TrueType fonts, so format
* and rank can be specified without looking at the filename.
*/
PhysicalFont physicalFont = null;
if (fontFile != null) {
physicalFont = registerFontFile(getPathName(fontFile), null, FONTFORMAT_TRUETYPE, false, Font2D.TTF_RANK);
}
/* Register all fonts in this family. */
for (int f = 0; f < fontList.length; f++) {
String fontNameLC = fontList[f].toLowerCase(Locale.ENGLISH);
String fileName = fontToFileMap.get(fontNameLC);
if (fontFile != null && fontFile.equals(fileName)) {
continue;
}
/* Currently this code only looks for TrueType fonts, so format
* and rank can be specified without looking at the filename.
*/
registerFontFile(getPathName(fileName), null, FONTFORMAT_TRUETYPE, false, Font2D.TTF_RANK);
}
Font2D font = null;
FontFamily fontFamily = FontFamily.getFamily(familyName);
/* Handle case where request "MyFont Bold", style=Font.ITALIC */
if (physicalFont != null) {
style |= physicalFont.style;
}
if (fontFamily != null) {
font = fontFamily.getFont(style);
if (font == null) {
font = fontFamily.getClosestStyle(style);
}
}
return font;
}
private ConcurrentHashMap<String, Font2D> fontNameCache = new ConcurrentHashMap<String, Font2D>();
/*
* The client supplies a name and a style.
* The name could be a family name, or a full name.
* A font may exist with the specified style, or it may
* exist only in some other style. For non-native fonts the scaler
* may be able to emulate the required style.
*/
public Font2D findFont2D(String name, int style, int fallback) {
String lowerCaseName = name.toLowerCase(Locale.ENGLISH);
String mapName = lowerCaseName + dotStyleStr(style);
Font2D font;
/* If preferLocaleFonts() or preferProportionalFonts() has been
* called we may be using an alternate set of composite fonts in this
* app context. The presence of a pre-built name map indicates whether
* this is so, and gives access to the alternate composite for the
* name.
*/
if (_usingPerAppContextComposites) {
ConcurrentHashMap<String, Font2D> altNameCache = (ConcurrentHashMap<String, Font2D>) AppContext
.getAppContext().get(CompositeFont.class);
if (altNameCache != null) {
font = (Font2D) altNameCache.get(mapName);
} else {
font = null;
}
} else {
font = fontNameCache.get(mapName);
}
if (font != null) {
return font;
}
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Search for font: " + name);
}
// The check below is just so that the bitmap fonts being set by
// AWT and Swing thru the desktop properties do not trigger the
// the load fonts case. The two bitmap fonts are now mapped to
// appropriate equivalents for serif and sansserif.
// Note that the cost of this comparison is only for the first
// call until the map is filled.
if (FontUtilities.isWindows) {
if (lowerCaseName.equals("ms sans serif")) {
name = "sansserif";
} else if (lowerCaseName.equals("ms serif")) {
name = "serif";
}
}
/* This isn't intended to support a client passing in the
* string default, but if a client passes in null for the name
* the java.awt.Font class internally substitutes this name.
* So we need to recognise it here to prevent a loadFonts
* on the unrecognised name. The only potential problem with
* this is it would hide any real font called "default"!
* But that seems like a potential problem we can ignore for now.
*/
if (lowerCaseName.equals("default")) {
name = "dialog";
}
/* First see if its a family name. */
FontFamily family = FontFamily.getFamily(name);
if (family != null) {
font = family.getFontWithExactStyleMatch(style);
if (font == null) {
font = findDeferredFont(name, style);
}
if (font == null) {
font = family.getFont(style);
}
if (font == null) {
font = family.getClosestStyle(style);
}
if (font != null) {
fontNameCache.put(mapName, font);
return font;
}
}
/* If it wasn't a family name, it should be a full name of
* either a composite, or a physical font
*/
font = fullNameToFont.get(lowerCaseName);
if (font != null) {
/* Check that the requested style matches the matched font's style.
* But also match style automatically if the requested style is
* "plain". This because the existing behaviour is that the fonts
* listed via getAllFonts etc always list their style as PLAIN.
* This does lead to non-commutative behaviours where you might
* start with "Lucida Sans Regular" and ask for a BOLD version
* and get "Lucida Sans DemiBold" but if you ask for the PLAIN
* style of "Lucida Sans DemiBold" you get "Lucida Sans DemiBold".
* This consistent however with what happens if you have a bold
* version of a font and no plain version exists - alg. styling
* doesn't "unbolden" the font.
*/
if (font.style == style || style == Font.PLAIN) {
fontNameCache.put(mapName, font);
return font;
} else {
/* If it was a full name like "Lucida Sans Regular", but
* the style requested is "bold", then we want to see if
* there's the appropriate match against another font in
* that family before trying to load all fonts, or applying a
* algorithmic styling
*/
family = FontFamily.getFamily(font.getFamilyName(null));
if (family != null) {
Font2D familyFont = family.getFont(style | font.style);
/* We exactly matched the requested style, use it! */
if (familyFont != null) {
fontNameCache.put(mapName, familyFont);
return familyFont;
} else {
/* This next call is designed to support the case
* where bold italic is requested, and if we must
* style, then base it on either bold or italic -
* not on plain!
*/
familyFont = family.getClosestStyle(style | font.style);
if (familyFont != null) {
/* The next check is perhaps one
* that shouldn't be done. ie if we get this
* far we have probably as close a match as we
* are going to get. We could load all fonts to
* see if somehow some parts of the family are
* loaded but not all of it.
*/
if (familyFont.canDoStyle(style | font.style)) {
fontNameCache.put(mapName, familyFont);
return familyFont;
}
}
}
}
}
}
if (FontUtilities.isWindows) {
font = findFontFromPlatformMap(lowerCaseName, style);
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("findFontFromPlatformMap returned " + font);
}
if (font != null) {
fontNameCache.put(mapName, font);
return font;
}
/* Don't want Windows to return a Lucida Sans font from
* C:\Windows\Fonts
*/
if (deferredFontFiles.size() > 0) {
font = findJREDeferredFont(lowerCaseName, style);
if (font != null) {
fontNameCache.put(mapName, font);
return font;
}
}
font = findFontFromPlatform(lowerCaseName, style);
if (font != null) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info(
"Found font via platform API for request:\"" + name + "\":, style=" + style + " found font: " +
font);
}
fontNameCache.put(mapName, font);
return font;
}
}
/* If reach here and no match has been located, then if there are
* uninitialised deferred fonts, load as many of those as needed
* to find the deferred font. If none is found through that
* search continue on.
* There is possibly a minor issue when more than one
* deferred font implements the same font face. Since deferred
* fonts are only those in font configuration files, this is a
* controlled situation, the known case being Solaris euro_fonts
* versions of Arial, Times New Roman, Courier New. However
* the larger font will transparently replace the smaller one
* - see addToFontList() - when it is needed by the composite font.
*/
if (deferredFontFiles.size() > 0) {
font = findDeferredFont(name, style);
if (font != null) {
fontNameCache.put(mapName, font);
return font;
}
}
/* Some apps use deprecated 1.0 names such as helvetica and courier. On
* Solaris these are Type1 fonts in /usr/openwin/lib/X11/fonts/Type1.
* If running on Solaris will register all the fonts in this
* directory.
* May as well register the whole directory without actually testing
* the font name is one of the deprecated names as the next step would
* load all fonts which are in this directory anyway.
* In the event that this lookup is successful it potentially "hides"
* TrueType versions of such fonts that are elsewhere but since they
* do not exist on Solaris this is not a problem.
* Set a flag to indicate we've done this registration to avoid
* repetition and more seriously, to avoid recursion.
*/
if (FontUtilities.isSolaris && !loaded1dot0Fonts) {
/* "timesroman" is a special case since that's not the
* name of any known font on Solaris or elsewhere.
*/
if (lowerCaseName.equals("timesroman")) {
font = findFont2D("serif", style, fallback);
fontNameCache.put(mapName, font);
}
register1dot0Fonts();
loaded1dot0Fonts = true;
Font2D ff = findFont2D(name, style, fallback);
return ff;
}
/* We check for application registered fonts before
* explicitly loading all fonts as if necessary the registration
* code will have done so anyway. And we don't want to needlessly
* load the actual files for all fonts.
* Just as for installed fonts we check for family before fullname.
* We do not add these fonts to fontNameCache for the
* app context case which eliminates the overhead of a per context
* cache for these.
*/
if (fontsAreRegistered || fontsAreRegisteredPerAppContext) {
Hashtable<String, FontFamily> familyTable = null;
Hashtable<String, Font2D> nameTable;
if (fontsAreRegistered) {
familyTable = createdByFamilyName;
nameTable = createdByFullName;
} else {
AppContext appContext = AppContext.getAppContext();
familyTable = (Hashtable<String, FontFamily>) appContext.get(regFamilyKey);
nameTable = (Hashtable<String, Font2D>) appContext.get(regFullNameKey);
}
family = familyTable.get(lowerCaseName);
if (family != null) {
font = family.getFontWithExactStyleMatch(style);
if (font == null) {
font = family.getFont(style);
}
if (font == null) {
font = family.getClosestStyle(style);
}
if (font != null) {
if (fontsAreRegistered) {
fontNameCache.put(mapName, font);
}
return font;
}
}
font = nameTable.get(lowerCaseName);
if (font != null) {
if (fontsAreRegistered) {
fontNameCache.put(mapName, font);
}
return font;
}
}
/* If reach here and no match has been located, then if all fonts
* are not yet loaded, do so, and then recurse.
*/
if (!loadedAllFonts) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Load fonts looking for:" + name);
}
loadFonts();
loadedAllFonts = true;
return findFont2D(name, style, fallback);
}
if (!loadedAllFontFiles) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Load font files looking for:" + name);
}
loadFontFiles();
loadedAllFontFiles = true;
return findFont2D(name, style, fallback);
}
/* The primary name is the locale default - ie not US/English but
* whatever is the default in this locale. This is the way it always
* has been but may be surprising to some developers if "Arial Regular"
* were hard-coded in their app and yet "Arial Regular" was not the
* default name. Fortunately for them, as a consequence of the JDK
* supporting returning names and family names for arbitrary locales,
* we also need to support searching all localised names for a match.
* But because this case of the name used to reference a font is not
* the same as the default for this locale is rare, it makes sense to
* search a much shorter list of default locale names and only go to
* a longer list of names in the event that no match was found.
* So add here code which searches localised names too.
* As in 1.4.x this happens only after loading all fonts, which
* is probably the right order.
*/
if ((font = findFont2DAllLocales(name, style)) != null) {
fontNameCache.put(mapName, font);
return font;
}
/* Perhaps its a "compatibility" name - timesroman, helvetica,
* or courier, which 1.0 apps used for logical fonts.
* We look for these "late" after a loadFonts as we must not
* hide real fonts of these names.
* Map these appropriately:
* On windows this means according to the rules specified by the
* FontConfiguration : do it only for encoding==Cp1252
*
* REMIND: this is something we plan to remove.
*/
if (FontUtilities.isWindows) {
String compatName = getFontConfiguration().getFallbackFamilyName(name, null);
if (compatName != null) {
font = findFont2D(compatName, style, fallback);
fontNameCache.put(mapName, font);
return font;
}
} else if (lowerCaseName.equals("timesroman")) {
font = findFont2D("serif", style, fallback);
fontNameCache.put(mapName, font);
return font;
} else if (lowerCaseName.equals("helvetica")) {
font = findFont2D("sansserif", style, fallback);
fontNameCache.put(mapName, font);
return font;
} else if (lowerCaseName.equals("courier")) {
font = findFont2D("monospaced", style, fallback);
fontNameCache.put(mapName, font);
return font;
}
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("No font found for:" + name);
}
switch (fallback) {
case PHYSICAL_FALLBACK:
return getDefaultPhysicalFont();
case LOGICAL_FALLBACK:
return getDefaultLogicalFont(style);
default:
return null;
}
}
/*
* Workaround for apps which are dependent on a font metrics bug
* in JDK 1.1. This is an unsupported win32 private setting.
* Left in for a customer - do not remove.
*/
public boolean usePlatformFontMetrics() {
return usePlatformFontMetrics;
}
public int getNumFonts() {
return physicalFonts.size() + maxCompFont;
}
private static boolean fontSupportsEncoding(Font font, String encoding) {
return FontUtilities.getFont2D(font).supportsEncoding(encoding);
}
protected abstract String getFontPath(boolean noType1Fonts);
private Thread fileCloser = null;
Vector<File> tmpFontFiles = null;
public Font2D createFont2D(File fontFile, int fontFormat, boolean isCopy, CreatedFontTracker tracker)
throws FontFormatException {
String fontFilePath = fontFile.getPath();
FileFont font2D = null;
final File fFile = fontFile;
final CreatedFontTracker _tracker = tracker;
try {
switch (fontFormat) {
case Font.TRUETYPE_FONT:
font2D = new TrueTypeFont(fontFilePath, null, 0, true);
break;
case Font.TYPE1_FONT:
font2D = new Type1Font(fontFilePath, null, isCopy);
break;
default:
throw new FontFormatException("Unrecognised Font Format");
}
} catch (FontFormatException e) {
if (isCopy) {
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
if (_tracker != null) {
_tracker.subBytes((int) fFile.length());
}
fFile.delete();
return null;
}
});
}
throw (e);
}
if (isCopy) {
font2D.setFileToRemove(fontFile, tracker);
synchronized (FontManager.class) {
if (tmpFontFiles == null) {
tmpFontFiles = new Vector<File>();
}
tmpFontFiles.add(fontFile);
if (fileCloser == null) {
final Runnable fileCloserRunnable = new Runnable() {
public void run() {
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
for (int i = 0; i < CHANNELPOOLSIZE; i++) {
if (fontFileCache[i] != null) {
try {
fontFileCache[i].close();
} catch (Exception e) {
}
}
}
if (tmpFontFiles != null) {
File[] files = new File[tmpFontFiles.size()];
files = tmpFontFiles.toArray(files);
for (int f = 0; f < files.length; f++) {
try {
files[f].delete();
} catch (Exception e) {
}
}
}
return null;
}
});
}
};
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
/* The thread must be a member of a thread group
* which will not get GCed before VM exit.
* Make its parent the top-level thread group.
*/
ThreadGroup tg = Thread.currentThread().getThreadGroup();
for (ThreadGroup tgn = tg; tgn != null; tg = tgn, tgn = tg.getParent()) { ; }
fileCloser = new Thread(tg, fileCloserRunnable);
fileCloser.setContextClassLoader(null);
Runtime.getRuntime().addShutdownHook(fileCloser);
return null;
}
});
}
}
}
return font2D;
}
/* remind: used in X11GraphicsEnvironment and called often enough
* that we ought to obsolete this code
*/
public synchronized String getFullNameByFileName(String fileName) {
PhysicalFont[] physFonts = getPhysicalFonts();
for (int i = 0; i < physFonts.length; i++) {
if (physFonts[i].platName.equals(fileName)) {
return (physFonts[i].getFontName(null));
}
}
return null;
}
/*
* This is called when font is determined to be invalid/bad.
* It designed to be called (for example) by the font scaler
* when in processing a font file it is discovered to be incorrect.
* This is different than the case where fonts are discovered to
* be incorrect during initial verification, as such fonts are
* never registered.
* Handles to this font held are re-directed to a default font.
* This default may not be an ideal substitute buts it better than
* crashing This code assumes a PhysicalFont parameter as it doesn't
* make sense for a Composite to be "bad".
*/
public synchronized void deRegisterBadFont(Font2D font2D) {
if (!(font2D instanceof PhysicalFont)) {
/* We should never reach here, but just in case */
return;
} else {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().severe("Deregister bad font: " + font2D);
}
replaceFont((PhysicalFont) font2D, getDefaultPhysicalFont());
}
}
/*
* This encapsulates all the work that needs to be done when a
* Font2D is replaced by a different Font2D.
*/
public synchronized void replaceFont(PhysicalFont oldFont, PhysicalFont newFont) {
if (oldFont.handle.font2D != oldFont) {
/* already done */
return;
}
/* If we try to replace the font with itself, that won't work,
* so pick any alternative physical font
*/
if (oldFont == newFont) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().severe("Can't replace bad font with itself " + oldFont);
}
PhysicalFont[] physFonts = getPhysicalFonts();
for (int i = 0; i < physFonts.length; i++) {
if (physFonts[i] != newFont) {
newFont = physFonts[i];
break;
}
}
if (oldFont == newFont) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().severe("This is bad. No good physicalFonts found.");
}
return;
}
}
/* eliminate references to this font, so it won't be located
* by future callers, and will be eligible for GC when all
* references are removed
*/
oldFont.handle.font2D = newFont;
physicalFonts.remove(oldFont.fullName);
fullNameToFont.remove(oldFont.fullName.toLowerCase(Locale.ENGLISH));
FontFamily.remove(oldFont);
if (localeFullNamesToFont != null) {
Map.Entry[] mapEntries = (Map.Entry[]) localeFullNamesToFont.entrySet().
toArray(new Map.Entry[0]);
/* Should I be replacing these, or just I just remove
* the names from the map?
*/
for (int i = 0; i < mapEntries.length; i++) {
if (mapEntries[i].getValue() == oldFont) {
try {
mapEntries[i].setValue(newFont);
} catch (Exception e) {
/* some maps don't support this operation.
* In this case just give up and remove the entry.
*/
localeFullNamesToFont.remove(mapEntries[i].getKey());
}
}
}
}
for (int i = 0; i < maxCompFont; i++) {
/* Deferred initialization of composites shouldn't be
* a problem for this case, since a font must have been
* initialised to be discovered to be bad.
* Some JRE composites on Solaris use two versions of the same
* font. The replaced font isn't bad, just "smaller" so there's
* no need to make the slot point to the new font.
* Since composites have a direct reference to the Font2D (not
* via a handle) making this substitution is not safe and could
* cause an additional problem and so this substitution is
* warranted only when a font is truly "bad" and could cause
* a crash. So we now replace it only if its being substituted
* with some font other than a fontconfig rank font
* Since in practice a substitution will have the same rank
* this may never happen, but the code is safer even if its
* also now a no-op.
* The only obvious "glitch" from this stems from the current
* implementation that when asked for the number of glyphs in a
* composite it lies and returns the number in slot 0 because
* composite glyphs aren't contiguous. Since we live with that
* we can live with the glitch that depending on how it was
* initialised a composite may return different values for this.
* Fixing the issues with composite glyph ids is tricky as
* there are exclusion ranges and unlike other fonts even the
* true "numGlyphs" isn't a contiguous range. Likely the only
* solution is an API that returns an array of glyph ranges
* which takes precedence over the existing API. That might
* also need to address excluding ranges which represent a
* code point supported by an earlier component.
*/
if (newFont.getRank() > Font2D.FONT_CONFIG_RANK) {
compFonts[i].replaceComponentFont(oldFont, newFont);
}
}
}
private synchronized void loadLocaleNames() {
if (localeFullNamesToFont != null) {
return;
}
localeFullNamesToFont = new HashMap<String, TrueTypeFont>();
Font2D[] fonts = getRegisteredFonts();
for (int i = 0; i < fonts.length; i++) {
if (fonts[i] instanceof TrueTypeFont) {
TrueTypeFont ttf = (TrueTypeFont) fonts[i];
String[] fullNames = ttf.getAllFullNames();
for (int n = 0; n < fullNames.length; n++) {
localeFullNamesToFont.put(fullNames[n], ttf);
}
FontFamily family = FontFamily.getFamily(ttf.familyName);
if (family != null) {
FontFamily.addLocaleNames(family, ttf.getAllFamilyNames());
}
}
}
}
/* This replicate the core logic of findFont2D but operates on
* all the locale names. This hasn't been merged into findFont2D to
* keep the logic simpler and reduce overhead, since this case is
* almost never used. The main case in which it is called is when
* a bogus font name is used and we need to check all possible names
* before returning the default case.
*/
private Font2D findFont2DAllLocales(String name, int style) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Searching localised font names for:" + name);
}
/* If reach here and no match has been located, then if we have
* not yet built the map of localeFullNamesToFont for TT fonts, do so
* now. This method must be called after all fonts have been loaded.
*/
if (localeFullNamesToFont == null) {
loadLocaleNames();
}
String lowerCaseName = name.toLowerCase();
Font2D font = null;
/* First see if its a family name. */
FontFamily family = FontFamily.getLocaleFamily(lowerCaseName);
if (family != null) {
font = family.getFont(style);
if (font == null) {
font = family.getClosestStyle(style);
}
if (font != null) {
return font;
}
}
/* If it wasn't a family name, it should be a full name. */
synchronized (this) {
font = localeFullNamesToFont.get(name);
}
if (font != null) {
if (font.style == style || style == Font.PLAIN) {
return font;
} else {
family = FontFamily.getFamily(font.getFamilyName(null));
if (family != null) {
Font2D familyFont = family.getFont(style);
/* We exactly matched the requested style, use it! */
if (familyFont != null) {
return familyFont;
} else {
familyFont = family.getClosestStyle(style);
if (familyFont != null) {
/* The next check is perhaps one
* that shouldn't be done. ie if we get this
* far we have probably as close a match as we
* are going to get. We could load all fonts to
* see if somehow some parts of the family are
* loaded but not all of it.
* This check is commented out for now.
*/
if (!familyFont.canDoStyle(style)) {
familyFont = null;
}
return familyFont;
}
}
}
}
}
return font;
}
/* Supporting "alternate" composite fonts on 2D graphics objects
* is accessed by the application by calling methods on the local
* GraphicsEnvironment. The overall implementation is described
* in one place, here, since otherwise the implementation is spread
* around it may be difficult to track.
* The methods below call into SunGraphicsEnvironment which creates a
* new FontConfiguration instance. The FontConfiguration class,
* and its platform sub-classes are updated to take parameters requesting
* these behaviours. This is then used to create new composite font
* instances. Since this calls the initCompositeFont method in
* SunGraphicsEnvironment it performs the same initialization as is
* performed normally. There may be some duplication of effort, but
* that code is already written to be able to perform properly if called
* to duplicate work. The main difference is that if we detect we are
* running in an applet/browser/Java plugin environment these new fonts
* are not placed in the "default" maps but into an AppContext instance.
* The font lookup mechanism in java.awt.Font.getFont2D() is also updated
* so that look-up for composite fonts will in that case always
* do a lookup rather than returning a cached result.
* This is inefficient but necessary else singleton java.awt.Font
* instances would not retrieve the correct Font2D for the appcontext.
* sun.font.FontManager.findFont2D is also updated to that it uses
* a name map cache specific to that appcontext.
*
* Getting an AppContext is expensive, so there is a global variable
* that records whether these methods have ever been called and can
* avoid the expense for almost all applications. Once the correct
* CompositeFont is associated with the Font, everything should work
* through existing mechanisms.
* A special case is that GraphicsEnvironment.getAllFonts() must
* return an AppContext specific list.
*
* Calling the methods below is "heavyweight" but it is expected that
* these methods will be called very rarely.
*
* If _usingPerAppContextComposites is true, we are in "applet"
* (eg browser) enviroment and at least one context has selected
* an alternate composite font behaviour.
* If _usingAlternateComposites is true, we are not in an "applet"
* environment and the (single) application has selected
* an alternate composite font behaviour.
*
* - Printing: The implementation delegates logical fonts to an AWT
* mechanism which cannot use these alternate configurations.
* We can detect that alternate fonts are in use and back-off to 2D, but
* that uses outlines. Much of this can be fixed with additional work
* but that may have to wait. The results should be correct, just not
* optimal.
*/
private static final Object altJAFontKey = new Object();
private static final Object localeFontKey = new Object();
private static final Object proportionalFontKey = new Object();
private boolean _usingPerAppContextComposites = false;
private boolean _usingAlternateComposites = false;
/* These values are used only if we are running as a standalone
* application, as determined by maybeMultiAppContext();
*/
private static boolean gAltJAFont = false;
private boolean gLocalePref = false;
private boolean gPropPref = false;
/* This method doesn't check if alternates are selected in this app
* context. Its used by the FontMetrics caching code which in such
* a case cannot retrieve a cached metrics solely on the basis of
* the Font.equals() method since it needs to also check if the Font2D
* is the same.
* We also use non-standard composites for Swing native L&F fonts on
* Windows. In that case the policy is that the metrics reported are
* based solely on the physical font in the first slot which is the
* visible java.awt.Font. So in that case the metrics cache which tests
* the Font does what we want. In the near future when we expand the GTK
* logical font definitions we may need to revisit this if GTK reports
* combined metrics instead. For now though this test can be simple.
*/
public boolean maybeUsingAlternateCompositeFonts() {
return _usingAlternateComposites || _usingPerAppContextComposites;
}
public boolean usingAlternateCompositeFonts() {
return (_usingAlternateComposites ||
(_usingPerAppContextComposites && AppContext.getAppContext().get(CompositeFont.class) != null));
}
private static boolean maybeMultiAppContext() {
Boolean appletSM = (Boolean) java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
SecurityManager sm = System.getSecurityManager();
return new Boolean(sm instanceof sun.applet.AppletSecurity);
}
});
return appletSM.booleanValue();
}
/* Modifies the behaviour of a subsequent call to preferLocaleFonts()
* to use Mincho instead of Gothic for dialoginput in JA locales
* on windows. Not needed on other platforms.
*/
public synchronized void useAlternateFontforJALocales() {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Entered useAlternateFontforJALocales().");
}
if (!FontUtilities.isWindows) {
return;
}
if (!maybeMultiAppContext()) {
gAltJAFont = true;
} else {
AppContext appContext = AppContext.getAppContext();
appContext.put(altJAFontKey, altJAFontKey);
}
}
public boolean usingAlternateFontforJALocales() {
if (!maybeMultiAppContext()) {
return gAltJAFont;
} else {
AppContext appContext = AppContext.getAppContext();
return appContext.get(altJAFontKey) == altJAFontKey;
}
}
public synchronized void preferLocaleFonts() {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Entered preferLocaleFonts().");
}
/* Test if re-ordering will have any effect */
if (!FontConfiguration.willReorderForStartupLocale()) {
return;
}
if (!maybeMultiAppContext()) {
if (gLocalePref == true) {
return;
}
gLocalePref = true;
createCompositeFonts(fontNameCache, gLocalePref, gPropPref);
_usingAlternateComposites = true;
} else {
AppContext appContext = AppContext.getAppContext();
if (appContext.get(localeFontKey) == localeFontKey) {
return;
}
appContext.put(localeFontKey, localeFontKey);
boolean acPropPref = appContext.get(proportionalFontKey) == proportionalFontKey;
ConcurrentHashMap<String, Font2D> altNameCache = new ConcurrentHashMap<String, Font2D>();
/* If there is an existing hashtable, we can drop it. */
appContext.put(CompositeFont.class, altNameCache);
_usingPerAppContextComposites = true;
createCompositeFonts(altNameCache, true, acPropPref);
}
}
public synchronized void preferProportionalFonts() {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Entered preferProportionalFonts().");
}
/* If no proportional fonts are configured, there's no need
* to take any action.
*/
if (!FontConfiguration.hasMonoToPropMap()) {
return;
}
if (!maybeMultiAppContext()) {
if (gPropPref == true) {
return;
}
gPropPref = true;
createCompositeFonts(fontNameCache, gLocalePref, gPropPref);
_usingAlternateComposites = true;
} else {
AppContext appContext = AppContext.getAppContext();
if (appContext.get(proportionalFontKey) == proportionalFontKey) {
return;
}
appContext.put(proportionalFontKey, proportionalFontKey);
boolean acLocalePref = appContext.get(localeFontKey) == localeFontKey;
ConcurrentHashMap<String, Font2D> altNameCache = new ConcurrentHashMap<String, Font2D>();
/* If there is an existing hashtable, we can drop it. */
appContext.put(CompositeFont.class, altNameCache);
_usingPerAppContextComposites = true;
createCompositeFonts(altNameCache, acLocalePref, true);
}
}
private static HashSet<String> installedNames = null;
private static HashSet<String> getInstalledNames() {
if (installedNames == null) {
Locale l = getSystemStartupLocale();
SunFontManager fontManager = SunFontManager.getInstance();
String[] installedFamilies = fontManager.getInstalledFontFamilyNames(l);
Font[] installedFonts = fontManager.getAllInstalledFonts();
HashSet<String> names = new HashSet<String>();
for (int i = 0; i < installedFamilies.length; i++) {
names.add(installedFamilies[i].toLowerCase(l));
}
for (int i = 0; i < installedFonts.length; i++) {
names.add(installedFonts[i].getFontName(l).toLowerCase(l));
}
installedNames = names;
}
return installedNames;
}
/* Keys are used to lookup per-AppContext Hashtables */
private static final Object regFamilyKey = new Object();
private static final Object regFullNameKey = new Object();
private Hashtable<String, FontFamily> createdByFamilyName;
private Hashtable<String, Font2D> createdByFullName;
private boolean fontsAreRegistered = false;
private boolean fontsAreRegisteredPerAppContext = false;
public boolean registerFont(Font font) {
/* This method should not be called with "null".
* It is the caller's responsibility to ensure that.
*/
if (font == null) {
return false;
}
/* Initialise these objects only once we start to use this API */
synchronized (regFamilyKey) {
if (createdByFamilyName == null) {
createdByFamilyName = new Hashtable<String, FontFamily>();
createdByFullName = new Hashtable<String, Font2D>();
}
}
if (!FontAccess.getFontAccess().isCreatedFont(font)) {
return false;
}
/* We want to ensure that this font cannot override existing
* installed fonts. Check these conditions :
* - family name is not that of an installed font
* - full name is not that of an installed font
* - family name is not the same as the full name of an installed font
* - full name is not the same as the family name of an installed font
* The last two of these may initially look odd but the reason is
* that (unfortunately) Font constructors do not distinuguish these.
* An extreme example of such a problem would be a font which has
* family name "Dialog.Plain" and full name of "Dialog".
* The one arguably overly stringent restriction here is that if an
* application wants to supply a new member of an existing family
* It will get rejected. But since the JRE can perform synthetic
* styling in many cases its not necessary.
* We don't apply the same logic to registered fonts. If apps want
* to do this lets assume they have a reason. It won't cause problems
* except for themselves.
*/
HashSet<String> names = getInstalledNames();
Locale l = getSystemStartupLocale();
String familyName = font.getFamily(l).toLowerCase();
String fullName = font.getFontName(l).toLowerCase();
if (names.contains(familyName) || names.contains(fullName)) {
return false;
}
/* Checks passed, now register the font */
Hashtable<String, FontFamily> familyTable;
Hashtable<String, Font2D> fullNameTable;
if (!maybeMultiAppContext()) {
familyTable = createdByFamilyName;
fullNameTable = createdByFullName;
fontsAreRegistered = true;
} else {
AppContext appContext = AppContext.getAppContext();
familyTable = (Hashtable<String, FontFamily>) appContext.get(regFamilyKey);
fullNameTable = (Hashtable<String, Font2D>) appContext.get(regFullNameKey);
if (familyTable == null) {
familyTable = new Hashtable<String, FontFamily>();
fullNameTable = new Hashtable<String, Font2D>();
appContext.put(regFamilyKey, familyTable);
appContext.put(regFullNameKey, fullNameTable);
}
fontsAreRegisteredPerAppContext = true;
}
/* Create the FontFamily and add font to the tables */
Font2D font2D = FontUtilities.getFont2D(font);
int style = font2D.getStyle();
FontFamily family = familyTable.get(familyName);
if (family == null) {
family = new FontFamily(font.getFamily(l));
familyTable.put(familyName, family);
}
/* Remove name cache entries if not using app contexts.
* To accommodate a case where code may have registered first a plain
* family member and then used it and is now registering a bold family
* member, we need to remove all members of the family, so that the
* new style can get picked up rather than continuing to synthesise.
*/
if (fontsAreRegistered) {
removeFromCache(family.getFont(Font.PLAIN));
removeFromCache(family.getFont(Font.BOLD));
removeFromCache(family.getFont(Font.ITALIC));
removeFromCache(family.getFont(Font.BOLD | Font.ITALIC));
removeFromCache(fullNameTable.get(fullName));
}
family.setFont(font2D, style);
fullNameTable.put(fullName, font2D);
return true;
}
/* Remove from the name cache all references to the Font2D */
private void removeFromCache(Font2D font) {
if (font == null) {
return;
}
String[] keys = (String[]) (fontNameCache.keySet().toArray(STR_ARRAY));
for (int k = 0; k < keys.length; k++) {
if (fontNameCache.get(keys[k]) == font) {
fontNameCache.remove(keys[k]);
}
}
}
// It may look odd to use TreeMap but its more convenient to the caller.
public TreeMap<String, String> getCreatedFontFamilyNames() {
Hashtable<String, FontFamily> familyTable;
if (fontsAreRegistered) {
familyTable = createdByFamilyName;
} else if (fontsAreRegisteredPerAppContext) {
AppContext appContext = AppContext.getAppContext();
familyTable = (Hashtable<String, FontFamily>) appContext.get(regFamilyKey);
} else {
return null;
}
Locale l = getSystemStartupLocale();
synchronized (familyTable) {
TreeMap<String, String> map = new TreeMap<String, String>();
for (FontFamily f : familyTable.values()) {
Font2D font2D = f.getFont(Font.PLAIN);
if (font2D == null) {
font2D = f.getClosestStyle(Font.PLAIN);
}
String name = font2D.getFamilyName(l);
map.put(name.toLowerCase(l), name);
}
return map;
}
}
public Font[] getCreatedFonts() {
Hashtable<String, Font2D> nameTable;
if (fontsAreRegistered) {
nameTable = createdByFullName;
} else if (fontsAreRegisteredPerAppContext) {
AppContext appContext = AppContext.getAppContext();
nameTable = (Hashtable<String, Font2D>) appContext.get(regFullNameKey);
} else {
return null;
}
Locale l = getSystemStartupLocale();
synchronized (nameTable) {
Font[] fonts = new Font[nameTable.size()];
int i = 0;
for (Font2D font2D : nameTable.values()) {
fonts[i++] = new Font(font2D.getFontName(l), Font.PLAIN, 1);
}
return fonts;
}
}
protected String[] getPlatformFontDirs(boolean noType1Fonts) {
/* First check if we already initialised path dirs */
if (pathDirs != null) {
return pathDirs;
}
String path = getPlatformFontPath(noType1Fonts);
StringTokenizer parser = new StringTokenizer(path, File.pathSeparator);
ArrayList<String> pathList = new ArrayList<String>();
try {
while (parser.hasMoreTokens()) {
pathList.add(parser.nextToken());
}
} catch (NoSuchElementException e) {
}
pathDirs = pathList.toArray(new String[0]);
return pathDirs;
}
/**
* Returns an array of two strings. The first element is the
* name of the font. The second element is the file name.
*/
public abstract String[] getDefaultPlatformFont();
// Begin: Refactored from SunGraphicsEnviroment.
/*
* helper function for registerFonts
*/
private void addDirFonts(String dirName, File dirFile, FilenameFilter filter, int fontFormat,
boolean useJavaRasterizer, int fontRank, boolean defer, boolean resolveSymLinks) {
String[] ls = dirFile.list(filter);
if (ls == null || ls.length == 0) {
return;
}
String[] fontNames = new String[ls.length];
String[][] nativeNames = new String[ls.length][];
int fontCount = 0;
for (int i = 0; i < ls.length; i++) {
File theFile = new File(dirFile, ls[i]);
String fullName = null;
if (resolveSymLinks) {
try {
fullName = theFile.getCanonicalPath();
} catch (IOException e) {
}
}
if (fullName == null) {
fullName = dirName + File.separator + ls[i];
}
// REMIND: case compare depends on platform
if (registeredFontFiles.contains(fullName)) {
continue;
}
if (badFonts != null && badFonts.contains(fullName)) {
if (FontUtilities.debugFonts()) {
FontUtilities.getLogger().warning("skip bad font " + fullName);
}
continue; // skip this font file.
}
registeredFontFiles.add(fullName);
if (FontUtilities.debugFonts() && FontUtilities.getLogger().isLoggable(PlatformLogger.INFO)) {
String message = "Registering font " + fullName;
String[] natNames = getNativeNames(fullName, null);
if (natNames == null) {
message += " with no native name";
} else {
message += " with native name(s) " + natNames[0];
for (int nn = 1; nn < natNames.length; nn++) {
message += ", " + natNames[nn];
}
}
FontUtilities.getLogger().info(message);
}
fontNames[fontCount] = fullName;
nativeNames[fontCount++] = getNativeNames(fullName, null);
}
registerFonts(fontNames, nativeNames, fontCount, fontFormat, useJavaRasterizer, fontRank, defer);
return;
}
protected String[] getNativeNames(String fontFileName, String platformName) {
return null;
}
/**
* Returns a file name for the physical font represented by this platform
* font name. The default implementation tries to obtain the file name
* from the font configuration.
* Subclasses may override to provide information from other sources.
*/
protected String getFileNameFromPlatformName(String platformFontName) {
return fontConfig.getFileNameFromPlatformName(platformFontName);
}
/**
* Return the default font configuration.
*/
public FontConfiguration getFontConfiguration() {
return fontConfig;
}
/* A call to this method should be followed by a call to
* registerFontDirs(..)
*/
public String getPlatformFontPath(boolean noType1Font) {
if (fontPath == null) {
fontPath = getFontPath(noType1Font);
}
return fontPath;
}
public static boolean isOpenJDK() {
return FontUtilities.isOpenJDK;
}
protected void loadFonts() {
if (discoveredAllFonts) {
return;
}
/* Use lock specific to the font system */
synchronized (this) {
if (FontUtilities.debugFonts()) {
Thread.dumpStack();
FontUtilities.getLogger().info("SunGraphicsEnvironment.loadFonts() called");
}
initialiseDeferredFonts();
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
if (fontPath == null) {
fontPath = getPlatformFontPath(noType1Font);
registerFontDirs(fontPath);
}
if (fontPath != null) {
// this will find all fonts including those already
// registered. But we have checks in place to prevent
// double registration.
if (!gotFontsFromPlatform()) {
registerFontsOnPath(fontPath, false, Font2D.UNKNOWN_RANK, false, true);
loadedAllFontFiles = true;
}
}
registerOtherFontFiles(registeredFontFiles);
discoveredAllFonts = true;
return null;
}
});
}
}
protected void registerFontDirs(String pathName) {
return;
}
private void registerFontsOnPath(String pathName, boolean useJavaRasterizer, int fontRank, boolean defer,
boolean resolveSymLinks) {
StringTokenizer parser = new StringTokenizer(pathName, File.pathSeparator);
try {
while (parser.hasMoreTokens()) {
registerFontsInDir(parser.nextToken(), useJavaRasterizer, fontRank, defer, resolveSymLinks);
}
} catch (NoSuchElementException e) {
}
}
/* Called to register fall back fonts */
public void registerFontsInDir(String dirName) {
registerFontsInDir(dirName, true, Font2D.JRE_RANK, true, false);
}
private void registerFontsInDir(String dirName, boolean useJavaRasterizer, int fontRank, boolean defer,
boolean resolveSymLinks) {
File pathFile = new File(dirName);
addDirFonts(dirName, pathFile, ttFilter, FONTFORMAT_TRUETYPE, useJavaRasterizer,
fontRank == Font2D.UNKNOWN_RANK ? Font2D.TTF_RANK : fontRank, defer, resolveSymLinks);
addDirFonts(dirName, pathFile, t1Filter, FONTFORMAT_TYPE1, useJavaRasterizer,
fontRank == Font2D.UNKNOWN_RANK ? Font2D.TYPE1_RANK : fontRank, defer, resolveSymLinks);
}
protected void registerFontDir(String path) {
}
/**
* Returns file name for default font, either absolute
* or relative as needed by registerFontFile.
*/
public synchronized String getDefaultFontFile() {
if (defaultFontFileName == null) {
initDefaultFonts();
}
return defaultFontFileName;
}
private void initDefaultFonts() {
if (!isOpenJDK()) {
defaultFontName = lucidaFontName;
if (useAbsoluteFontFileNames()) {
defaultFontFileName = jreFontDirName + File.separator + FontUtilities.LUCIDA_FILE_NAME;
} else {
defaultFontFileName = FontUtilities.LUCIDA_FILE_NAME;
}
}
}
/**
* Whether registerFontFile expects absolute or relative
* font file names.
*/
protected boolean useAbsoluteFontFileNames() {
return true;
}
/**
* Creates this environment's FontConfiguration.
*/
protected abstract FontConfiguration createFontConfiguration();
public abstract FontConfiguration createFontConfiguration(boolean preferLocaleFonts, boolean preferPropFonts);
/**
* Returns face name for default font, or null if
* no face names are used for CompositeFontDescriptors
* for this platform.
*/
public synchronized String getDefaultFontFaceName() {
if (defaultFontName == null) {
initDefaultFonts();
}
return defaultFontName;
}
public void loadFontFiles() {
loadFonts();
if (loadedAllFontFiles) {
return;
}
/* Use lock specific to the font system */
synchronized (this) {
if (FontUtilities.debugFonts()) {
Thread.dumpStack();
FontUtilities.getLogger().info("loadAllFontFiles() called");
}
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
if (fontPath == null) {
fontPath = getPlatformFontPath(noType1Font);
}
if (fontPath != null) {
// this will find all fonts including those already
// registered. But we have checks in place to prevent
// double registration.
registerFontsOnPath(fontPath, false, Font2D.UNKNOWN_RANK, false, true);
}
loadedAllFontFiles = true;
return null;
}
});
}
}
/*
* This method asks the font configuration API for all platform names
* used as components of composite/logical fonts and iterates over these
* looking up their corresponding file name and registers these fonts.
* It also ensures that the fonts are accessible via platform APIs.
* The composites themselves are then registered.
*/
private void initCompositeFonts(FontConfiguration fontConfig, ConcurrentHashMap<String, Font2D> altNameCache) {
if (FontUtilities.isLogging()) {
FontUtilities.getLogger().info("Initialising composite fonts");
}
int numCoreFonts = fontConfig.getNumberCoreFonts();
String[] fcFonts = fontConfig.getPlatformFontNames();
for (int f = 0; f < fcFonts.length; f++) {
String platformFontName = fcFonts[f];
String fontFileName = getFileNameFromPlatformName(platformFontName);
String[] nativeNames = null;
if (fontFileName == null || fontFileName.equals(platformFontName)) {
/* No file located, so register using the platform name,
* i.e. as a native font.
*/
fontFileName = platformFontName;
} else {
if (f < numCoreFonts) {
/* If platform APIs also need to access the font, add it
* to a set to be registered with the platform too.
* This may be used to add the parent directory to the X11
* font path if its not already there. See the docs for the
* subclass implementation.
* This is now mainly for the benefit of X11-based AWT
* But for historical reasons, 2D initialisation code
* makes these calls.
* If the fontconfiguration file is properly set up
* so that all fonts are mapped to files and all their
* appropriate directories are specified, then this
* method will be low cost as it will return after
* a test that finds a null lookup map.
*/
addFontToPlatformFontPath(platformFontName);
}
nativeNames = getNativeNames(fontFileName, platformFontName);
}
/* Uncomment these two lines to "generate" the XLFD->filename
* mappings needed to speed start-up on Solaris.
* Augment this with the appendedpathname and the mappings
* for native (F3) fonts
*/
//String platName = platformFontName.replaceAll(" ", "_");
//System.out.println("filename."+platName+"="+fontFileName);
registerFontFile(fontFileName, nativeNames, Font2D.FONT_CONFIG_RANK, true);
}
/* This registers accumulated paths from the calls to
* addFontToPlatformFontPath(..) and any specified by
* the font configuration. Rather than registering
* the fonts it puts them in a place and form suitable for
* the Toolkit to pick up and use if a toolkit is initialised,
* and if it uses X11 fonts.
*/
registerPlatformFontsUsedByFontConfiguration();
CompositeFontDescriptor[] compositeFontInfo = fontConfig.get2DCompositeFontInfo();
for (int i = 0; i < compositeFontInfo.length; i++) {
CompositeFontDescriptor descriptor = compositeFontInfo[i];
String[] componentFileNames = descriptor.getComponentFileNames();
String[] componentFaceNames = descriptor.getComponentFaceNames();
/* It would be better eventually to handle this in the
* FontConfiguration code which should also remove duplicate slots
*/
if (missingFontFiles != null) {
for (int ii = 0; ii < componentFileNames.length; ii++) {
if (missingFontFiles.contains(componentFileNames[ii])) {
componentFileNames[ii] = getDefaultFontFile();
componentFaceNames[ii] = getDefaultFontFaceName();
}
}
}
/* FontConfiguration needs to convey how many fonts it has added
* as fallback component fonts which should not affect metrics.
* The core component count will be the number of metrics slots.
* This does not preclude other mechanisms for adding
* fall back component fonts to the composite.
*/
if (altNameCache != null) {
SunFontManager.registerCompositeFont(descriptor.getFaceName(), componentFileNames, componentFaceNames,
descriptor.getCoreComponentCount(), descriptor.getExclusionRanges(),
descriptor.getExclusionRangeLimits(), true, altNameCache);
} else {
registerCompositeFont(descriptor.getFaceName(), componentFileNames, componentFaceNames,
descriptor.getCoreComponentCount(), descriptor.getExclusionRanges(),
descriptor.getExclusionRangeLimits(), true);
}
if (FontUtilities.debugFonts()) {
FontUtilities.getLogger().info("registered " + descriptor.getFaceName());
}
}
}
/**
* Notifies graphics environment that the logical font configuration
* uses the given platform font name. The graphics environment may
* use this for platform specific initialization.
*/
protected void addFontToPlatformFontPath(String platformFontName) {
}
protected void registerFontFile(String fontFileName, String[] nativeNames, int fontRank, boolean defer) {
// REMIND: case compare depends on platform
if (registeredFontFiles.contains(fontFileName)) {
return;
}
int fontFormat;
if (ttFilter.accept(null, fontFileName)) {
fontFormat = FONTFORMAT_TRUETYPE;
} else if (t1Filter.accept(null, fontFileName)) {
fontFormat = FONTFORMAT_TYPE1;
} else {
fontFormat = FONTFORMAT_NATIVE;
}
registeredFontFiles.add(fontFileName);
if (defer) {
registerDeferredFont(fontFileName, fontFileName, nativeNames, fontFormat, false, fontRank);
} else {
registerFontFile(fontFileName, nativeNames, fontFormat, false, fontRank);
}
}
protected void registerPlatformFontsUsedByFontConfiguration() {
}
/*
* A GE may verify whether a font file used in a fontconfiguration
* exists. If it doesn't then either we may substitute the default
* font, or perhaps elide it altogether from the composite font.
* This makes some sense on windows where the font file is only
* likely to be in one place. But on other OSes, eg Linux, the file
* can move around depending. So there we probably don't want to assume
* its missing and so won't add it to this list.
* If this list - missingFontFiles - is non-null then the composite
* font initialisation logic tests to see if a font file is in that
* set.
* Only one thread should be able to add to this set so we don't
* synchronize.
*/
protected void addToMissingFontFileList(String fileName) {
if (missingFontFiles == null) {
missingFontFiles = new HashSet<String>();
}
missingFontFiles.add(fileName);
}
/*
* This is for use only within getAllFonts().
* Fonts listed in the fontconfig files for windows were all
* on the "deferred" initialisation list. They were registered
* either in the course of the application, or in the call to
* loadFonts() within getAllFonts(). The fontconfig file specifies
* the names of the fonts using the English names. If there's a
* different name in the execution locale, then the platform will
* report that, and we will construct the font with both names, and
* thereby enumerate it twice. This happens for Japanese fonts listed
* in the windows fontconfig, when run in the JA locale. The solution
* is to rely (in this case) on the platform's font->file mapping to
* determine that this name corresponds to a file we already registered.
* This works because
* - we know when we get here all deferred fonts are already initialised
* - when we register a font file, we register all fonts in it.
* - we know the fontconfig fonts are all in the windows registry
*/
private boolean isNameForRegisteredFile(String fontName) {
String fileName = getFileNameForFontName(fontName);
if (fileName == null) {
return false;
}
return registeredFontFiles.contains(fileName);
}
/*
* This invocation is not in a privileged block because
* all privileged operations (reading files and properties)
* was conducted on the creation of the GE
*/
public void createCompositeFonts(ConcurrentHashMap<String, Font2D> altNameCache, boolean preferLocale,
boolean preferProportional) {
FontConfiguration fontConfig = createFontConfiguration(preferLocale, preferProportional);
initCompositeFonts(fontConfig, altNameCache);
}
/**
* Returns all fonts installed in this environment.
*/
public Font[] getAllInstalledFonts() {
if (allFonts == null) {
loadFonts();
TreeMap fontMapNames = new TreeMap();
/* warning: the number of composite fonts could change dynamically
* if applications are allowed to create them. "allfonts" could
* then be stale.
*/
Font2D[] allfonts = getRegisteredFonts();
for (int i = 0; i < allfonts.length; i++) {
if (!(allfonts[i] instanceof NativeFont)) {
fontMapNames.put(allfonts[i].getFontName(null), allfonts[i]);
}
}
String[] platformNames = getFontNamesFromPlatform();
if (platformNames != null) {
for (int i = 0; i < platformNames.length; i++) {
if (!isNameForRegisteredFile(platformNames[i])) {
fontMapNames.put(platformNames[i], null);
}
}
}
String[] fontNames = null;
if (fontMapNames.size() > 0) {
fontNames = new String[fontMapNames.size()];
Object[] keyNames = fontMapNames.keySet().toArray();
for (int i = 0; i < keyNames.length; i++) {
fontNames[i] = (String) keyNames[i];
}
}
Font[] fonts = new Font[fontNames.length];
for (int i = 0; i < fontNames.length; i++) {
fonts[i] = new Font(fontNames[i], Font.PLAIN, 1);
Font2D f2d = (Font2D) fontMapNames.get(fontNames[i]);
if (f2d != null) {
FontAccess.getFontAccess().setFont2D(fonts[i], f2d.handle);
}
}
allFonts = fonts;
}
Font[] copyFonts = new Font[allFonts.length];
System.arraycopy(allFonts, 0, copyFonts, 0, allFonts.length);
return copyFonts;
}
/**
* Get a list of installed fonts in the requested {@link Locale}.
* The list contains the fonts Family Names.
* If Locale is null, the default locale is used.
*
* @param requestedLocale, if null the default locale is used.
* @return list of installed fonts in the system.
*/
public String[] getInstalledFontFamilyNames(Locale requestedLocale) {
if (requestedLocale == null) {
requestedLocale = Locale.getDefault();
}
if (allFamilies != null && lastDefaultLocale != null && requestedLocale.equals(lastDefaultLocale)) {
String[] copyFamilies = new String[allFamilies.length];
System.arraycopy(allFamilies, 0, copyFamilies, 0, allFamilies.length);
return copyFamilies;
}
TreeMap<String, String> familyNames = new TreeMap<String, String>();
// these names are always there and aren't localised
String str;
str = Font.SERIF;
familyNames.put(str.toLowerCase(), str);
str = Font.SANS_SERIF;
familyNames.put(str.toLowerCase(), str);
str = Font.MONOSPACED;
familyNames.put(str.toLowerCase(), str);
str = Font.DIALOG;
familyNames.put(str.toLowerCase(), str);
str = Font.DIALOG_INPUT;
familyNames.put(str.toLowerCase(), str);
/* Platform APIs may be used to get the set of available family
* names for the current default locale so long as it is the same
* as the start-up system locale, rather than loading all fonts.
*/
if (requestedLocale.equals(getSystemStartupLocale()) &&
getFamilyNamesFromPlatform(familyNames, requestedLocale)) {
/* Augment platform names with JRE font family names */
getJREFontFamilyNames(familyNames, requestedLocale);
} else {
loadFontFiles();
Font2D[] physicalfonts = getPhysicalFonts();
for (int i = 0; i < physicalfonts.length; i++) {
if (!(physicalfonts[i] instanceof NativeFont)) {
String name = physicalfonts[i].getFamilyName(requestedLocale);
familyNames.put(name.toLowerCase(requestedLocale), name);
}
}
}
String[] retval = new String[familyNames.size()];
Object[] keyNames = familyNames.keySet().toArray();
for (int i = 0; i < keyNames.length; i++) {
retval[i] = (String) familyNames.get(keyNames[i]);
}
if (requestedLocale.equals(Locale.getDefault())) {
lastDefaultLocale = requestedLocale;
allFamilies = new String[retval.length];
System.arraycopy(retval, 0, allFamilies, 0, allFamilies.length);
}
return retval;
}
public void register1dot0Fonts() {
java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
String type1Dir = "/usr/openwin/lib/X11/fonts/Type1";
registerFontsInDir(type1Dir, true, Font2D.TYPE1_RANK, false, false);
return null;
}
});
}
/* Really we need only the JRE fonts family names, but there's little
* overhead in doing this the easy way by adding all the currently
* known fonts.
*/
protected void getJREFontFamilyNames(TreeMap<String, String> familyNames, Locale requestedLocale) {
registerDeferredJREFonts(jreFontDirName);
Font2D[] physicalfonts = getPhysicalFonts();
for (int i = 0; i < physicalfonts.length; i++) {
if (!(physicalfonts[i] instanceof NativeFont)) {
String name = physicalfonts[i].getFamilyName(requestedLocale);
familyNames.put(name.toLowerCase(requestedLocale), name);
}
}
}
/**
* Default locale can be changed but we need to know the initial locale
* as that is what is used by native code. Changing Java default locale
* doesn't affect that.
* Returns the locale in use when using native code to communicate
* with platform APIs. On windows this is known as the "system" locale,
* and it is usually the same as the platform locale, but not always,
* so this method also checks an implementation property used only
* on windows and uses that if set.
*/
private static Locale systemLocale = null;
private static Locale getSystemStartupLocale() {
if (systemLocale == null) {
systemLocale = (Locale) java.security.AccessController.doPrivileged(new java.security.PrivilegedAction() {
public Object run() {
/* On windows the system locale may be different than the
* user locale. This is an unsupported configuration, but
* in that case we want to return a dummy locale that will
* never cause a match in the usage of this API. This is
* important because Windows documents that the family
* names of fonts are enumerated using the language of
* the system locale. BY returning a dummy locale in that
* case we do not use the platform API which would not
* return us the names we want.
*/
String fileEncoding = System.getProperty("file.encoding", "");
String sysEncoding = System.getProperty("sun.jnu.encoding");
if (sysEncoding != null && !sysEncoding.equals(fileEncoding)) {
return Locale.ROOT;
}
String language = System.getProperty("user.language", "en");
String country = System.getProperty("user.country", "");
String variant = System.getProperty("user.variant", "");
return new Locale(language, country, variant);
}
});
}
return systemLocale;
}
void addToPool(FileFont font) {
FileFont fontFileToClose = null;
int freeSlot = -1;
synchronized (fontFileCache) {
/* Avoid duplicate entries in the pool, and don't close() it,
* since this method is called only from within open().
* Seeing a duplicate is most likely to happen if the thread
* was interrupted during a read, forcing perhaps repeated
* close and open calls and it eventually it ends up pointing
* at the same slot.
*/
for (int i = 0; i < CHANNELPOOLSIZE; i++) {
if (fontFileCache[i] == font) {
return;
}
if (fontFileCache[i] == null && freeSlot < 0) {
freeSlot = i;
}
}
if (freeSlot >= 0) {
fontFileCache[freeSlot] = font;
return;
} else {
/* replace with new font. */
fontFileToClose = fontFileCache[lastPoolIndex];
fontFileCache[lastPoolIndex] = font;
/* lastPoolIndex is updated so that the least recently opened
* file will be closed next.
*/
lastPoolIndex = (lastPoolIndex + 1) % CHANNELPOOLSIZE;
}
}
/* Need to close the font file outside of the synchronized block,
* since its possible some other thread is in an open() call on
* this font file, and could be holding its lock and the pool lock.
* Releasing the pool lock allows that thread to continue, so it can
* then release the lock on this font, allowing the close() call
* below to proceed.
* Also, calling close() is safe because any other thread using
* the font we are closing() synchronizes all reading, so we
* will not close the file while its in use.
*/
if (fontFileToClose != null) {
fontFileToClose.close();
}
}
protected FontUIResource getFontConfigFUIR(String family, int style, int size) {
return new FontUIResource(family, style, size);
}
}
| [
"\"JAVA2D_USEPLATFORMFONT\""
]
| []
| [
"JAVA2D_USEPLATFORMFONT"
]
| [] | ["JAVA2D_USEPLATFORMFONT"] | java | 1 | 0 | |
PythonAPI/util/generate_map.py | #!/usr/bin/env python
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Generate map from FBX"""
import os
import json
import subprocess
import shutil
import argparse
if os.name == 'nt':
sys_name = 'Win64'
elif os.name == 'posix':
sys_name = 'Linux'
def main():
if(args.force):
generate_all_maps_but_list([])
else:
maps = get_map_names()
generate_all_maps_but_list(maps)
def get_map_names():
maps = []
dirname = os.getcwd()
map_place = os.path.join(dirname, "..", "..", "Unreal", "CarlaUE4", "Content", "Carla", "ExportedMaps")
for filename in os.listdir(map_place):
if filename.endswith('.umap'):
maps.append(filename)
return maps
def generate_all_maps_but_list(existent_maps):
map_name = ""
dirname = os.getcwd()
fbx_place = os.path.join(dirname, "..", "..", "RoadRunnerFiles")
for x in os.walk(fbx_place):
map_name = os.path.basename(x[0])
if map_name != "RoadRunnerFiles":
if not any(ext in "%s.umap" % map_name for ext in existent_maps):
print("Found map in fbx folder: %s" % map_name)
import_assets_commandlet(map_name)
#move_uassets(map_name)
print("Generating map asset for %s" % map_name)
generate_map(map_name)
print("Cleaning up directories")
cleanup_assets(map_name)
print("Finished %s" % map_name)
else:
print("WARNING: Found %s map in Content folder, skipping. Use \"--force\" to override\n" % map_name)
def parse_arguments():
argparser = argparse.ArgumentParser(
description=__doc__)
argparser.add_argument(
'-f', '--force',
action='store_true',
help='Force import. Will override maps with the same name')
argparser.add_argument(
'-m', '--map',
metavar='M',
type=str,
help='Map to import. If empty, all maps in the folder will be loaded')
argparser.add_argument(
'--usecarlamats',
action='store_true',
help='Avoid using RoadRunner materials. Use materials provided by Carla instead')
return argparser.parse_args()
def cleanup_assets(map_name):
dirname = os.getcwd()
content_folder = os.path.join(dirname, "..", "..", "Unreal", "CarlaUE4" , "Content", "Carla")
origin_folder = os.path.join(content_folder, "Static", "Imported", map_name)
for filename in os.listdir(origin_folder):
if map_name in filename:
removal_path = os.path.join(origin_folder, filename)
os.remove(removal_path)
def import_assets_commandlet(map_name):
generate_json(map_name, "importsetting.json")
dirname = os.getcwd()
commandlet_name = "ImportAssets"
import_settings = os.path.join(dirname, "importsetting.json")
commandlet_arguments = "-importSettings=\"%s\" -AllowCommandletRendering -nosourcecontrol -replaceexisting" % import_settings
file_xodr_origin = os.path.join(dirname, "..", "..", "RoadRunnerFiles", map_name, "%s.xodr" % map_name)
file_xodr_dest = os.path.join(dirname, "..", "..", "Unreal", "CarlaUE4", "Content", "Carla", "Maps", "OpenDrive", "%s.xodr" % map_name)
shutil.copy2(file_xodr_origin, file_xodr_dest)
invoke_commandlet(commandlet_name, commandlet_arguments)
#Clean up
os.remove("importsetting.json")
def generate_map(map_name):
commandlet_name = "MapProcess"
commandlet_arguments = "-mapname=\"%s\"" % map_name
if args.usecarlamats:
commandlet_arguments += " -use-carla-materials"
invoke_commandlet(commandlet_name, commandlet_arguments)
#This line might be needed if Epic tells us anything about the current way of doing the movement. It shouldn't but just in case...
def move_uassets(map_name):
dirname = os.getcwd()
content_folder = os.path.join(dirname, "..", "..", "Unreal", "CarlaUE4" , "Content", "Carla")
origin_folder = os.path.join(content_folder, "Static", map_name)
dest_path = ""
src_path = ""
marking_dir = os.path.join(content_folder, "Static", "RoadLines", "%sLaneMarking" % map_name)
road_dir = os.path.join(content_folder, "Static", "Road", "Roads%s" % map_name)
terrain_dir = os.path.join(content_folder, "Static", "Terrain", "%sTerrain" % map_name)
if not os.path.exists(marking_dir):
os.makedirs(marking_dir)
if not os.path.exists(road_dir):
os.makedirs(road_dir)
if not os.path.exists(terrain_dir):
os.makedirs(terrain_dir)
for filename in os.listdir(origin_folder):
if "MarkingNode" in filename:
dest_path = os.path.join(marking_dir, filename)
if "RoadNode" in filename:
dest_path = os.path.join(road_dir, filename)
if "TerrainNode" in filename:
dest_path = os.path.join(terrain_dir, filename)
src_path = os.path.join(content_folder, "Static", map_name, filename)
os.rename(src_path, dest_path)
def invoke_commandlet(name, arguments):
ue4_path = os.environ['UE4_ROOT']
dirname = os.getcwd()
editor_path = "%s/Engine/Binaries/%s/UE4Editor" % (ue4_path, sys_name)
uproject_path = os.path.join(dirname, "..", "..", "Unreal", "CarlaUE4", "CarlaUE4.uproject")
full_command = "%s %s -run=%s %s" % (editor_path, uproject_path, name, arguments)
subprocess.check_call([full_command], shell=True)
def generate_json(map_name, json_file):
with open(json_file, "a+") as fh:
import_groups = []
file_names = []
import_settings = []
fbx_path = os.path.join("..", "..", "RoadRunnerFiles", map_name, "%s.fbx" % map_name)
file_names.append(fbx_path)
import_settings.append({
"bImportMesh": 1,
"bConvertSceneUnit": 1,
"bConvertScene": 1,
"bCombineMeshes": 1,
"bImportTextures": 1,
"bImportMaterials": 1,
"bRemoveDegenerates":1,
"AnimSequenceImportData": {},
"SkeletalMeshImportData": {},
"TextureImportData": {},
"StaticMeshImportData": {
"bRemoveDegenerates": 1,
"bAutoGenerateCollision": 0,
"bCombineMeshes":0
}
})
dest_path = "/Game/Carla/Static/Imported/%s" % map_name
import_groups.append({
"ImportSettings": import_settings,
"FactoryName": "FbxFactory",
"DestinationPath": dest_path,
"bReplaceExisting": "true",
"FileNames": file_names
})
fh.write(json.dumps({"ImportGroups": import_groups}))
fh.close()
if __name__ == '__main__':
try:
args = parse_arguments()
main()
dirname = os.path.dirname(os.path.abspath(__file__))
relative_path = os.path.join(dirname, "..", "..", "Unreal", "CarlaUE4", "Content", "Carla", "ExportedMaps")
print('Map(s) exported to %s' % os.path.abspath(relative_path))
finally:
print('\ndone.') | []
| []
| [
"UE4_ROOT"
]
| [] | ["UE4_ROOT"] | python | 1 | 0 | |
internal/storage/writer/azure/azure.go | package azure
import (
"context"
"fmt"
"math/rand"
"net/url"
"os"
"path"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/kelindar/talaria/internal/encoding/key"
"github.com/kelindar/talaria/internal/monitor"
"github.com/kelindar/talaria/internal/monitor/errors"
"github.com/mroth/weightedrand"
)
// Writer represents a writer for Microsoft Azure.
type Writer struct {
prefix string
container *storage.Container
}
// New creates a new writer.
func New(container, prefix string) (*Writer, error) {
// From the Azure portal, get your storage account name and key and set environment variables.
accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY")
var serviceBaseURL, apiVersion string
if serviceBaseURL = os.Getenv("AZURE_BASE_URL"); serviceBaseURL == "" {
serviceBaseURL = storage.DefaultBaseURL
}
if apiVersion = os.Getenv("AZURE_API_VERSION"); apiVersion == "" {
apiVersion = storage.DefaultAPIVersion
}
if len(accountName) == 0 || len(accountKey) == 0 {
return nil, errors.New("azure: either the AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY environment variable is not set")
}
// Create a new storage client
client, err := storage.NewClient(accountName, accountKey, serviceBaseURL, apiVersion, true)
if err != nil {
return nil, errors.Internal("azure: unable to create a client", err)
}
svc := client.GetBlobService()
ref := svc.GetContainerReference(container)
return &Writer{
prefix: prefix,
container: ref,
}, nil
}
// Write writes the data to the sink.
func (w *Writer) Write(key key.Key, val []byte) error {
if w.container == nil {
return errors.New("azure: unable to obtain a container reference")
}
ref := w.container.GetBlobReference(path.Join(w.prefix, string(key)))
if err := ref.PutAppendBlob(nil); err != nil {
return errors.Internal("azure: unable to write", err)
}
if err := ref.AppendBlock(val, nil); err != nil {
return errors.Internal("azure: unable to write", err)
}
return nil
}
const (
ctxTag = "azure"
tokenRefreshBuffer = 2 * time.Minute
defaultBlobServiceURL = "https://%s.blob.core.windows.net"
defaultResourceID = "https://storage.azure.com/"
)
// MultiAccountWriter represents a writer for Microsoft Azure with multiple storage accounts.
type MultiAccountWriter struct {
monitor monitor.Monitor
blobServiceURL string
prefix string
containerURLs []azblob.ContainerURL
options azblob.UploadToBlockBlobOptions
chooser *weightedrand.Chooser
}
// NewMultiAccountWriter creates a new MultiAccountWriter.
func NewMultiAccountWriter(monitor monitor.Monitor, blobServiceURL, container, prefix string, storageAccount []string, weights []uint, parallelism uint16, blockSize int64) (*MultiAccountWriter, error) {
if _, present := os.LookupEnv("AZURE_AD_RESOURCE"); !present {
if err := os.Setenv("AZURE_AD_RESOURCE", defaultResourceID); err != nil {
return nil, errors.New("azure: unable to set default AZURE_AD_RESOURCE environment variable")
}
}
if blobServiceURL == "" {
blobServiceURL = defaultBlobServiceURL
}
credential, err := GetAzureStorageCredentials(monitor)
if err != nil {
return nil, errors.Internal("azure: unable to get azure storage credential", err)
}
containerURLs := make([]azblob.ContainerURL, len(storageAccount))
for i, sa := range storageAccount {
azureStoragePipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{
Retry: azblob.RetryOptions{
MaxTries: 3,
},
})
u, _ := url.Parse(fmt.Sprintf(blobServiceURL, sa))
containerURLs[i] = azblob.NewServiceURL(*u, azureStoragePipeline).NewContainerURL(container)
monitor.Info(fmt.Sprintf("azure: new azure storage pipeline created for %s", u))
}
var chooser *weightedrand.Chooser
if weights != nil {
if len(storageAccount) != len(weights) {
return nil, fmt.Errorf("Invalid configuration number of storage account %v != number of weights %v", len(storageAccount), len(weights))
}
choices := make([]weightedrand.Choice, len(storageAccount))
for i, w := range weights {
choices[i] = weightedrand.Choice{
Item: &containerURLs[i],
Weight: w,
}
monitor.Info(fmt.Sprintf("azure: writer weights for %v set to %d", containerURLs[i], w))
}
chooser, err = weightedrand.NewChooser(choices...)
if err != nil {
return nil, err
}
}
return &MultiAccountWriter{
monitor: monitor,
prefix: prefix,
containerURLs: containerURLs,
options: azblob.UploadToBlockBlobOptions{
Parallelism: parallelism,
BlockSize: blockSize,
},
chooser: chooser,
}, nil
}
func GetAzureStorageCredentials(monitor monitor.Monitor) (azblob.Credential, error) {
spt, err := getServicePrincipalToken(monitor)
if err != nil {
return nil, err
}
// Refresh the token once
if err := spt.Refresh(); err != nil {
return nil, err
}
// Token refresher function
var tokenRefresher azblob.TokenRefresher
tokenRefresher = func(credential azblob.TokenCredential) time.Duration {
monitor.Info("azure: refreshing azure storage auth token")
// Get a new token
if err := spt.Refresh(); err != nil {
monitor.Error(errors.Internal("azure: unable to refresh service principle token", err))
panic(err)
}
token := spt.Token()
credential.SetToken(token.AccessToken)
// Return the expiry time (x minutes before the token expires)
exp := token.Expires().Sub(time.Now().Add(tokenRefreshBuffer))
monitor.Info("azure: received new token, valid for %s", exp)
return exp
}
credential := azblob.NewTokenCredential("", tokenRefresher)
return credential, nil
}
// Write writes the data to a randomly selected storage account sink.
func (m *MultiAccountWriter) Write(key key.Key, val []byte) error {
containerURL, err := m.getContainerURL()
if err != nil {
return err
}
return m.WriteToContanier(key, val, containerURL)
}
func (m *MultiAccountWriter) WriteToContanier(key key.Key, val []byte, containerURL *azblob.ContainerURL) error {
start := time.Now()
ctx := context.Background()
blobName := path.Join(m.prefix, string(key))
blockBlobURL := containerURL.NewBlockBlobURL(blobName)
_, err := azblob.UploadBufferToBlockBlob(ctx, val, blockBlobURL, m.options)
if err != nil {
m.monitor.Count1(ctxTag, "writeerror")
m.monitor.Info("failed_azure_write: %s", blobName)
return errors.Internal("azure: unable to write", err)
}
m.monitor.Histogram(ctxTag, "writelatency", float64(time.Since(start)))
return nil
}
func (m *MultiAccountWriter) getContainerURL() (*azblob.ContainerURL, error) {
if len(m.containerURLs) == 0 {
return nil, errors.New("azure: no containerURLs initialized")
}
if m.chooser != nil {
return m.chooser.Pick().(*azblob.ContainerURL), nil
}
i := rand.Intn(len(m.containerURLs))
return &m.containerURLs[i], nil
}
func getServicePrincipalToken(monitor monitor.Monitor) (*adal.ServicePrincipalToken, error) {
spt, err := adal.NewServicePrincipalTokenFromManagedIdentity(azure.PublicCloud.ResourceIdentifiers.Storage, &adal.ManagedIdentityOptions{})
if err == nil {
monitor.Info("azure: acquired Manange Identity Credentials")
return spt, err
}
monitor.Warning(errors.Internal("azure: unable to retrieve Manange Identity Credentials", err))
settings, err := auth.GetSettingsFromEnvironment()
if err != nil {
return nil, err
}
cc, err := settings.GetClientCredentials()
if err != nil {
return nil, err
}
spt, err = cc.ServicePrincipalToken()
return spt, err
}
| [
"\"AZURE_STORAGE_ACCOUNT\"",
"\"AZURE_STORAGE_ACCESS_KEY\"",
"\"AZURE_BASE_URL\"",
"\"AZURE_API_VERSION\""
]
| []
| [
"AZURE_STORAGE_ACCESS_KEY",
"AZURE_API_VERSION",
"AZURE_STORAGE_ACCOUNT",
"AZURE_BASE_URL"
]
| [] | ["AZURE_STORAGE_ACCESS_KEY", "AZURE_API_VERSION", "AZURE_STORAGE_ACCOUNT", "AZURE_BASE_URL"] | go | 4 | 0 | |
migrate/pggo_test.go | package migrate_test
import (
"context"
"fmt"
"os"
"testing"
"git.u4b.ru/swelf/pggo/v2/migrate"
"github.com/jackc/pgx/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// func TestMain(m *testing.M) {
// err := exec.Command("go", "build", "-o", "tmp/pggo").Run()
// if err != nil {
// fmt.Println("Failed to build pggo binary:", err)
// os.Exit(1)
// }
// os.Exit(m.Run())
// }
// var conn DBConnection
// func BTestNew(t *testing.T) {
// var err error
// conn, err = pgx.Connect(context.Background(), os.Getenv("MIGRATE_TEST_CONN_STRING"))
// if err != nil {
// t.Fatalf("error - %v", err)
// }
// m, err := migrate.NewMigrator(context.Background(), conn, "schema_version")
// err = m.LoadMigrations("/home/swelf/src/pggo/sample/")
// // fmt.Println(err)
// fmt.Println(m.MigrationsToApply(context.Background()))
// err = m.MigrateTo(context.Background(), "")
// fmt.Println(err)
// }
type MigrateTestSuite struct {
suite.Suite
m *migrate.Migrator
conn migrate.DBConnection
}
// Make sure that VariableThatShouldStartAtFive is set to five
// before each test
func (suite *MigrateTestSuite) SetupTest() {
var err error
os.Setenv("MIGRATE_TEST_CONN_STRING", "host=127.0.0.1 database=tern_migrate_test user=postgres password=12345")
conn, err := pgx.Connect(context.Background(), os.Getenv("MIGRATE_TEST_CONN_STRING"))
suite.Require().NoError(err, suite.T())
tx, err := conn.Begin(context.Background())
suite.Require().NoError(err, suite.T())
suite.conn = tx
suite.m, err = migrate.NewMigrator(context.Background(), suite.conn, "schema_version")
suite.Require().NoError(err, suite.T())
//clearing schema migration table
_, err = suite.conn.Exec(context.Background(), "delete from "+"schema_version")
suite.Require().NoError(err, suite.T())
_, err = suite.conn.Exec(context.Background(), "SAVEPOINT initial")
suite.Require().NoError(err, suite.T())
}
func (suite *MigrateTestSuite) TearDownTest() {
_, err := suite.conn.Exec(context.Background(), "ROLLBACK TO SAVEPOINT initial")
suite.Require().NoError(err, suite.T())
suite.conn.(pgx.Tx).Commit(context.Background())
suite.conn.(pgx.Tx).Conn().Close(context.Background())
}
// All methods that begin with "Test" are run as tests within a
// suite.
func (suite *MigrateTestSuite) TestFullMigrations() {
err := suite.m.LoadMigrations("testdata/sample/")
suite.Require().NoError(err, suite.T())
suite.Equal(3, len(suite.m.Migrations))
migration, err := suite.m.MigrationsToApply(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal(3, len(migration))
currentMigrations, err := suite.m.GetCurrentVersion(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal(0, len(currentMigrations))
err = suite.m.Migrate(context.Background())
suite.Require().NoError(err, suite.T())
//Repeat full migtaion
err = suite.m.Migrate(context.Background())
suite.Require().NoError(err, suite.T())
currentMigrations, err = suite.m.GetCurrentVersion(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal(3, len(currentMigrations))
}
func (suite *MigrateTestSuite) TestPartialMigrations() {
suite.m.Migrations = make(map[string]*migrate.Migration)
err := suite.m.LoadMigrations("testdata/sample/")
firstMigrationName := "001_create_t1.sql"
err = suite.m.MigrateTo(context.Background(), firstMigrationName)
currentMigrations, err := suite.m.GetCurrentVersion(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal(1, len(currentMigrations))
suite.Equal(currentMigrations[0], firstMigrationName)
needToApply, err := suite.m.MigrationsToApply(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal(len(needToApply), 2)
secondMigrationName := "002_create_t2.sql"
err = suite.m.MigrateTo(context.Background(), secondMigrationName)
currentMigrations, err = suite.m.GetCurrentVersion(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal(2, len(currentMigrations))
suite.Equal(currentMigrations[1], secondMigrationName)
}
func (suite *MigrateTestSuite) TestDetectDirection() {
suite.m.Migrations = make(map[string]*migrate.Migration)
suite.m.AppendMigration("migration_1", "create table t1(id serial primary key);", "drop table if exists t1;")
suite.m.AppendMigration("migration_2", "create table t2(id serial primary key);", "drop table if exists t2;")
suite.m.AppendMigration("migration_3", "create table t3(id serial primary key);", "drop table if exists t3;")
err := suite.m.MigrateTo(context.Background(), "migration_2")
suite.Require().NoError(err, suite.T())
direction, err := suite.m.GetDirection(context.Background(), "migration_2")
suite.Require().NoError(err, suite.T())
suite.Equal(migrate.Back, direction)
direction, err = suite.m.GetDirection(context.Background(), "migration_3")
suite.Require().NoError(err, suite.T())
suite.Equal(migrate.Forward, direction)
direction, err = suite.m.GetDirection(context.Background(), "migration_4")
suite.Require().NoError(err, suite.T())
suite.Equal(migrate.NotFound, direction)
}
func (suite *MigrateTestSuite) isTableExists(tableName string) bool {
var v bool
query := fmt.Sprintf(`SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = $1
)`)
err := suite.conn.QueryRow(context.Background(), query, tableName).Scan(&v)
if err != nil {
return false
}
return v
}
func (suite *MigrateTestSuite) TestForwardBackMigration() {
suite.m.Migrations = make(map[string]*migrate.Migration)
suite.m.AppendMigration("migration_1", "create table t1(id serial primary key);", "drop table if exists t1;")
suite.m.AppendMigration("migration_2", "create table t2(id serial primary key);", "drop table if exists t2;")
suite.m.AppendMigration("migration_3", "create table t3(id serial primary key);", "drop table if exists t3;")
err := suite.m.MigrateTo(context.Background(), "migration_3")
suite.Require().NoError(err, suite.T())
currentMigrations, err := suite.m.GetCurrentVersion(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal([]string{"migration_1", "migration_2", "migration_3"}, currentMigrations)
suite.Equal(true, suite.isTableExists("t1"), "t1 exists")
suite.Equal(true, suite.isTableExists("t2"), "t2 exists")
suite.Equal(true, suite.isTableExists("t3"), "t3 exists")
err = suite.m.MigrateTo(context.Background(), "migration_1")
suite.Require().NoError(err, suite.T())
currentMigrations, err = suite.m.GetCurrentVersion(context.Background())
suite.Require().NoError(err, suite.T())
suite.Equal([]string{"migration_1"}, currentMigrations)
suite.Equal(true, suite.isTableExists("t1"), "t1 exists")
suite.Equal(false, suite.isTableExists("t2"), "t2 exists")
suite.Equal(false, suite.isTableExists("t3"), "t3 exists")
}
func (suite *MigrateTestSuite) TestSchemaVersionInitialization() {
var err error
_, err = suite.conn.Exec(context.Background(), "drop table if exists "+"schema_version")
// fmt.Println(a)
suite.Require().NoError(err, suite.T())
_, err = migrate.NewMigrator(context.Background(), suite.conn, "schema_version")
suite.NoError(err)
}
func (suite *MigrateTestSuite) TestWrongMigration() {
suite.m.Migrations = make(map[string]*migrate.Migration)
err := suite.m.MigrateTo(context.Background(), "migration_3")
suite.Equal(migrate.MigrationNotFound{MigrationName: "migration_3"}, err)
}
func (suite *MigrateTestSuite) TestNoMigrations() {
suite.m.Migrations = make(map[string]*migrate.Migration)
err := suite.m.Migrate(context.Background())
suite.NoError(err)
// suite.Equal(migrate.MigrationNotFound{MigrationName: "migration_3"}, err)
}
// In order for 'go test' to run this suite, we need to create
// a normal test function and pass our suite to suite.Run
func TestExampleTestSuite(t *testing.T) {
suite.Run(t, new(MigrateTestSuite))
}
func TestHelpers(t *testing.T) {
strings := []string{"test1", "test2", "test3"}
assert.Equal(t, 0, migrate.Position(strings, "test1"))
assert.Equal(t, 2, migrate.Position(strings, "test3"))
assert.Equal(t, -1, migrate.Position(strings, "test4"))
a := []string{"test1", "test2", "test3"}
migrate.Reverse(a)
assert.Equal(t, []string{"test3", "test2", "test1"}, a)
a = []string{"test1", "test2"}
migrate.Reverse(a)
assert.Equal(t, []string{"test2", "test1"}, a)
a = []string{"test1"}
migrate.Reverse(a)
assert.Equal(t, []string{"test1"}, a)
a = []string{}
migrate.Reverse(a)
assert.Equal(t, []string{}, a)
}
| [
"\"MIGRATE_TEST_CONN_STRING\"",
"\"MIGRATE_TEST_CONN_STRING\""
]
| []
| [
"MIGRATE_TEST_CONN_STRING"
]
| [] | ["MIGRATE_TEST_CONN_STRING"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
from app import create_app, db
from app.models import User, Role, News, Player, Team, Comment
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, News=News, Player=Player,
Team=Team, Comment=Comment)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
from app.models import Role
# migrate database to lastest revision
upgrade()
# create user roles
Role.insert_roles()
if __name__ == '__main__':
manager.run()
| []
| []
| [
"FLASK_CONFIG"
]
| [] | ["FLASK_CONFIG"] | python | 1 | 0 | |
deisctl/cmd/cmd.go | package cmd
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/deis/deis/deisctl/backend"
"github.com/deis/deis/deisctl/config"
"github.com/deis/deis/deisctl/constant"
"github.com/deis/deis/deisctl/update"
"github.com/deis/deis/deisctl/utils"
"github.com/docopt/docopt-go"
)
const (
PlatformInstallCommand string = "platform"
)
var (
DefaultDataContainers = []string{
"logger-data",
}
)
func ListUnits(b backend.Backend) error {
err := b.ListUnits()
return err
}
func ListUnitFiles(b backend.Backend) error {
err := b.ListUnitFiles()
return err
}
func Scale(b backend.Backend, targets []string) error {
for _, target := range targets {
component, num, err := splitScaleTarget(target)
if err != nil {
return err
}
// the router is the only component that can scale past 1 at the moment
if num > 1 && !strings.Contains(component, "router") {
return fmt.Errorf("cannot scale %s past 1", component)
}
if err := b.Scale(component, num); err != nil {
return err
}
}
return nil
}
func Start(b backend.Backend, targets []string) error {
// if target is platform, start all services
if len(targets) == 1 && targets[0] == PlatformInstallCommand {
return StartPlatform(b)
}
return b.Start(targets)
}
func StartPlatform(b backend.Backend) error {
fmt.Println(utils.DeisIfy("Starting Deis..."))
if err := startDataContainers(b); err != nil {
return err
}
if err := startDefaultServices(b); err != nil {
return err
}
fmt.Println("Deis started.")
return nil
}
func startDataContainers(b backend.Backend) error {
fmt.Println("Launching data containers...")
if err := b.Start(DefaultDataContainers); err != nil {
return err
}
fmt.Println("Data containers launched.")
return nil
}
func startDefaultServices(b backend.Backend) error {
fmt.Println("Launching service containers...")
if err := Start(b, []string{"logger@1"}); err != nil {
return err
}
targets := []string{
"publisher",
"store-monitor",
"store-daemon",
"store-gateway@1",
"logspout",
"cache@1",
"router@1",
"database@1",
"controller@1",
"registry@1",
"builder@1",
}
if err := Start(b, targets); err != nil {
return err
}
fmt.Println("Service containers launched.")
return nil
}
func Stop(b backend.Backend, targets []string) error {
// if target is platform, stop all services
if len(targets) == 1 && targets[0] == PlatformInstallCommand {
return StopPlatform(b)
}
return b.Stop(targets)
}
func StopPlatform(b backend.Backend) error {
fmt.Println("Stopping Deis...")
if err := stopDefaultServices(b); err != nil {
return err
}
fmt.Println("Deis stopped.")
return nil
}
func stopDefaultServices(b backend.Backend) error {
fmt.Println("Stopping service containers...")
targets := []string{
"publisher",
"logspout",
"builder@1",
"registry@1",
"controller@1",
"database@1",
"store-gateway@1",
"store-daemon",
"store-monitor",
"cache@1",
"router@1",
"logger@1",
}
if err := Stop(b, targets); err != nil {
return err
}
fmt.Println("Service containers stopped.")
return nil
}
func Restart(b backend.Backend, targets []string) error {
if err := b.Stop(targets); err != nil {
return err
}
return b.Start(targets)
}
func Status(b backend.Backend, targets []string) error {
for _, target := range targets {
if err := b.Status(target); err != nil {
return err
}
}
return nil
}
func Journal(b backend.Backend, targets []string) error {
for _, target := range targets {
if err := b.Journal(target); err != nil {
return err
}
}
return nil
}
func Install(b backend.Backend, targets []string) error {
// if target is platform, install all services
if len(targets) == 1 && targets[0] == PlatformInstallCommand {
return InstallPlatform(b)
}
// otherwise create the specific targets
return b.Create(targets)
}
func InstallPlatform(b backend.Backend) error {
fmt.Println(utils.DeisIfy("Installing Deis..."))
if err := installDataContainers(b); err != nil {
return err
}
if err := installDefaultServices(b); err != nil {
return err
}
fmt.Println("Deis installed.")
fmt.Println("Please run `deisctl start platform` to boot up Deis.")
return nil
}
func installDataContainers(b backend.Backend) error {
fmt.Println("Scheduling data containers...")
if err := b.Create(DefaultDataContainers); err != nil {
return err
}
fmt.Println("Data containers scheduled.")
return nil
}
func installDefaultServices(b backend.Backend) error {
// Install global units
if err := b.Create([]string{"publisher", "logspout", "store-monitor", "store-daemon"}); err != nil {
return err
}
// start service containers
targets := []string{
"store-gateway=1",
"database=1",
"cache=1",
"logger=1",
"registry=1",
"controller=1",
"builder=1",
"router=1",
}
fmt.Println("Scheduling service containers...")
if err := Scale(b, targets); err != nil {
return err
}
fmt.Println("Service containers scheduled.")
return nil
}
func Uninstall(b backend.Backend, targets []string) error {
// if target is platform, uninstall all services
if len(targets) == 1 && targets[0] == PlatformInstallCommand {
return uninstallAllServices(b)
}
// uninstall the specific target
return b.Destroy(targets)
}
func uninstallAllServices(b backend.Backend) error {
targets := []string{
"store-gateway=0",
"database=0",
"cache=0",
"logger=0",
"registry=0",
"controller=0",
"builder=0",
"router=0",
}
fmt.Println("Destroying service containers...")
if err := Scale(b, targets); err != nil {
return err
}
// Uninstall global units
if err := b.Destroy([]string{"publisher", "logspout", "store-monitor", "store-daemon"}); err != nil {
return err
}
fmt.Println("Service containers destroyed.")
return nil
}
func splitScaleTarget(target string) (c string, num int, err error) {
r := regexp.MustCompile(`([a-z-]+)=([\d]+)`)
match := r.FindStringSubmatch(target)
if len(match) == 0 {
err = fmt.Errorf("Could not parse: %v", target)
return
}
c = match[1]
num, err = strconv.Atoi(match[2])
if err != nil {
return
}
return
}
func Config() error {
if err := config.Config(); err != nil {
return err
}
return nil
}
func Update() error {
if err := utils.Execute(constant.HooksDir + "pre-update"); err != nil {
fmt.Println("pre-updatehook failed")
return err
}
if err := update.Update(); err != nil {
fmt.Println("update engine failed")
return err
}
if err := utils.Execute(constant.HooksDir + "post-update"); err != nil {
fmt.Println("post-updatehook failed")
return err
}
return nil
}
func RefreshUnits() error {
usage := `Refreshes local unit files from the master repository.
deisctl looks for unit files in these directories, in this order:
- the $DEISCTL_UNITS environment variable, if set
- $HOME/.deis/units
- /var/lib/deis/units
Usage:
deisctl refresh-units [-p <target>] [-t <tag>]
Options:
-p --path=<target> where to save unit files [default: $HOME/.deis/units]
-t --tag=<tag> git tag, branch, or SHA to use when downloading unit files
[default: master]
`
// parse command-line arguments
args, err := docopt.Parse(usage, nil, true, "", false)
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(2)
}
dir := args["--path"].(string)
if dir == "$HOME/.deis/units" || dir == "~/.deis/units" {
dir = path.Join(os.Getenv("HOME"), ".deis", "units")
}
// create the target dir if necessary
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
// download and save the unit files to the specified path
rootURL := "https://raw.githubusercontent.com/deis/deis/"
tag := args["--tag"].(string)
units := []string{
"deis-builder.service",
"deis-cache.service",
"deis-controller.service",
"deis-database.service",
"deis-logger.service",
"deis-logger-data.service",
"deis-logspout.service",
"deis-publisher.service",
"deis-registry.service",
"deis-router.service",
"deis-store-daemon.service",
"deis-store-gateway.service",
"deis-store-monitor.service",
}
for _, unit := range units {
src := rootURL + tag + "/deisctl/units/" + unit
dest := filepath.Join(dir, unit)
res, err := http.Get(src)
if err != nil {
return err
}
if res.StatusCode != 200 {
return errors.New(res.Status)
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
if err = ioutil.WriteFile(dest, data, 0644); err != nil {
return err
}
fmt.Printf("Refreshed %s from %s\n", unit, tag)
}
return nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
montreal_forced_aligner/alignment/adapting.py | """Class definitions for adapting acoustic models"""
from __future__ import annotations
import multiprocessing as mp
import os
import shutil
import subprocess
import time
from queue import Empty
from typing import TYPE_CHECKING, List
import tqdm
from montreal_forced_aligner.abc import AdapterMixin
from montreal_forced_aligner.alignment.multiprocessing import AccStatsArguments, AccStatsFunction
from montreal_forced_aligner.alignment.pretrained import PretrainedAligner
from montreal_forced_aligner.exceptions import KaldiProcessingError
from montreal_forced_aligner.models import AcousticModel
from montreal_forced_aligner.utils import (
KaldiProcessWorker,
Stopped,
log_kaldi_errors,
thirdparty_binary,
)
if TYPE_CHECKING:
from montreal_forced_aligner.models import MetaDict
__all__ = ["AdaptingAligner"]
class AdaptingAligner(PretrainedAligner, AdapterMixin):
"""
Adapt an acoustic model to a new dataset
Parameters
----------
mapping_tau: int
Tau to use in mapping stats between new domain data and pretrained model
See Also
--------
:class:`~montreal_forced_aligner.alignment.pretrained.PretrainedAligner`
For dictionary, corpus, and alignment parameters
:class:`~montreal_forced_aligner.abc.AdapterMixin`
For adapting parameters
Attributes
----------
initialized: bool
Flag for whether initialization is complete
adaptation_done: bool
Flag for whether adaptation is complete
"""
def __init__(self, mapping_tau: int = 20, **kwargs):
super().__init__(**kwargs)
self.mapping_tau = mapping_tau
self.initialized = False
self.adaptation_done = False
def map_acc_stats_arguments(self, alignment=False) -> List[AccStatsArguments]:
"""
Generate Job arguments for :func:`~montreal_forced_aligner.alignment.multiprocessing.AccStatsFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.alignment.multiprocessing.AccStatsArguments`]
Arguments for processing
"""
feat_strings = self.construct_feature_proc_strings()
if alignment:
model_path = self.alignment_model_path
else:
model_path = self.model_path
return [
AccStatsArguments(
os.path.join(self.working_log_directory, f"map_acc_stats.{j.name}.log"),
j.current_dictionary_names,
feat_strings[j.name],
j.construct_path_dictionary(self.working_directory, "ali", "ark"),
j.construct_path_dictionary(self.working_directory, "map", "acc"),
model_path,
)
for j in self.jobs
]
def acc_stats(self, alignment=False):
arguments = self.map_acc_stats_arguments(alignment)
if alignment:
initial_mdl_path = os.path.join(self.working_directory, "0.alimdl")
final_mdl_path = os.path.join(self.working_directory, "0.alimdl")
else:
initial_mdl_path = os.path.join(self.working_directory, "0.mdl")
final_mdl_path = os.path.join(self.working_directory, "final.mdl")
if not os.path.exists(initial_mdl_path):
return
self.logger.info("Accumulating statistics...")
with tqdm.tqdm(total=self.num_utterances, disable=True) as pbar:
if self.use_mp:
manager = mp.Manager()
error_dict = manager.dict()
return_queue = manager.Queue()
stopped = Stopped()
procs = []
for i, args in enumerate(arguments):
function = AccStatsFunction(args)
p = KaldiProcessWorker(i, return_queue, function, error_dict, stopped)
procs.append(p)
p.start()
while True:
try:
num_utterances, errors = return_queue.get(timeout=1)
if stopped.stop_check():
continue
except Empty:
for proc in procs:
if not proc.finished.stop_check():
break
else:
break
continue
pbar.update(num_utterances + errors)
for p in procs:
p.join()
if error_dict:
for v in error_dict.values():
raise v
else:
for args in arguments:
function = AccStatsFunction(args)
for num_utterances, errors in function.run():
pbar.update(num_utterances + errors)
log_path = os.path.join(self.working_log_directory, "map_model_est.log")
occs_path = os.path.join(self.working_directory, "final.occs")
with open(log_path, "w", encoding="utf8") as log_file:
acc_files = []
for j in arguments:
acc_files.extend(j.acc_paths.values())
sum_proc = subprocess.Popen(
[thirdparty_binary("gmm-sum-accs"), "-"] + acc_files,
stderr=log_file,
stdout=subprocess.PIPE,
env=os.environ,
)
ismooth_proc = subprocess.Popen(
[
thirdparty_binary("gmm-ismooth-stats"),
"--smooth-from-model",
f"--tau={self.mapping_tau}",
initial_mdl_path,
"-",
"-",
],
stderr=log_file,
stdin=sum_proc.stdout,
stdout=subprocess.PIPE,
env=os.environ,
)
est_proc = subprocess.Popen(
[
thirdparty_binary("gmm-est"),
"--update-flags=m",
f"--write-occs={occs_path}",
"--remove-low-count-gaussians=false",
initial_mdl_path,
"-",
final_mdl_path,
],
stdin=ismooth_proc.stdout,
stderr=log_file,
env=os.environ,
)
est_proc.communicate()
@property
def workflow_identifier(self) -> str:
"""Adaptation identifier"""
return "adapt_acoustic_model"
@property
def align_directory(self) -> str:
"""Align directory"""
return os.path.join(self.output_directory, "adapted_align")
@property
def working_directory(self) -> str:
"""Current working directory"""
if self.adaptation_done:
return self.align_directory
return self.workflow_directory
@property
def working_log_directory(self) -> str:
"""Current log directory"""
return os.path.join(self.working_directory, "log")
@property
def model_path(self):
"""Current acoustic model path"""
if not self.adaptation_done:
return os.path.join(self.working_directory, "0.mdl")
return os.path.join(self.working_directory, "final.mdl")
@property
def next_model_path(self):
"""Mapped acoustic model path"""
return os.path.join(self.working_directory, "final.mdl")
def train_map(self) -> None:
"""
Trains an adapted acoustic model through mapping model states and update those with
enough data.
See Also
--------
:class:`~montreal_forced_aligner.alignment.multiprocessing.AccStatsFunction`
Multiprocessing helper function for each job
:meth:`.AdaptingAligner.map_acc_stats_arguments`
Job method for generating arguments for the helper function
:kaldi_src:`gmm-sum-accs`
Relevant Kaldi binary
:kaldi_src:`gmm-ismooth-stats`
Relevant Kaldi binary
:kaldi_src:`gmm-est`
Relevant Kaldi binary
:kaldi_steps:`train_map`
Reference Kaldi script
"""
begin = time.time()
log_directory = self.working_log_directory
os.makedirs(log_directory, exist_ok=True)
self.acc_stats(alignment=False)
if self.uses_speaker_adaptation:
self.acc_stats(alignment=True)
self.logger.debug(f"Mapping models took {time.time() - begin}")
def adapt(self) -> None:
"""Run the adaptation"""
self.setup()
dirty_path = os.path.join(self.working_directory, "dirty")
done_path = os.path.join(self.working_directory, "done")
if os.path.exists(done_path):
self.logger.info("Adaptation already done, skipping.")
return
self.logger.info("Generating initial alignments...")
for f in ["final.mdl", "final.alimdl"]:
p = os.path.join(self.working_directory, f)
if not os.path.exists(p):
continue
os.rename(p, os.path.join(self.working_directory, f.replace("final", "0")))
self.align()
os.makedirs(self.align_directory, exist_ok=True)
try:
self.logger.info("Adapting pretrained model...")
self.train_map()
self.export_model(os.path.join(self.working_log_directory, "acoustic_model.zip"))
shutil.copyfile(
os.path.join(self.working_directory, "final.mdl"),
os.path.join(self.align_directory, "final.mdl"),
)
shutil.copyfile(
os.path.join(self.working_directory, "final.occs"),
os.path.join(self.align_directory, "final.occs"),
)
shutil.copyfile(
os.path.join(self.working_directory, "tree"),
os.path.join(self.align_directory, "tree"),
)
if os.path.exists(os.path.join(self.working_directory, "final.alimdl")):
shutil.copyfile(
os.path.join(self.working_directory, "final.alimdl"),
os.path.join(self.align_directory, "final.alimdl"),
)
if os.path.exists(os.path.join(self.working_directory, "lda.mat")):
shutil.copyfile(
os.path.join(self.working_directory, "lda.mat"),
os.path.join(self.align_directory, "lda.mat"),
)
self.adaptation_done = True
except Exception as e:
with open(dirty_path, "w"):
pass
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs, self.logger)
e.update_log_file(self.logger)
raise
with open(done_path, "w"):
pass
@property
def meta(self) -> MetaDict:
"""Acoustic model metadata"""
from datetime import datetime
from ..utils import get_mfa_version
data = {
"phones": sorted(self.non_silence_phones),
"version": get_mfa_version(),
"architecture": self.acoustic_model.meta["architecture"],
"train_date": str(datetime.now()),
"features": self.feature_options,
"phone_set_type": str(self.phone_set_type),
}
return data
def export_model(self, output_model_path: str) -> None:
"""
Output an acoustic model to the specified path
Parameters
----------
output_model_path : str
Path to save adapted acoustic model
"""
directory, filename = os.path.split(output_model_path)
basename, _ = os.path.splitext(filename)
acoustic_model = AcousticModel.empty(basename, root_directory=self.working_log_directory)
acoustic_model.add_meta_file(self)
acoustic_model.add_model(self.align_directory)
if directory:
os.makedirs(directory, exist_ok=True)
basename, _ = os.path.splitext(output_model_path)
acoustic_model.dump(output_model_path)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
virtualbox/machine.go | package virtualbox
import (
"archive/tar"
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/boot2docker/boot2docker-cli/driver"
flag "github.com/ogier/pflag"
)
type Flag int
// Flag names in lowercases to be consistent with VBoxManage options.
const (
F_acpi Flag = 1 << iota
F_ioapic
F_rtcuseutc
F_cpuhotplug
F_pae
F_longmode
F_hpet
F_hwvirtex
F_triplefaultreset
F_nestedpaging
F_largepages
F_vtxvpid
F_vtxux
F_accelerate3d
)
type DriverCfg struct {
VBM string // Path to VBoxManage utility.
VMDK string // base VMDK to use as persistent disk.
shares shareSlice
// see also func ConfigFlags later in this file
}
var shareDefault string // set in ConfigFlags - this is what gets filled in for "shares" if it's empty
var (
verbose bool // Verbose mode (Local copy of B2D.Verbose).
cfg DriverCfg
)
func init() {
if err := driver.Register("virtualbox", InitFunc); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize driver. Error : %s", err.Error())
os.Exit(1)
}
if err := driver.RegisterConfig("virtualbox", ConfigFlags); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize driver config. Error : %s", err.Error())
os.Exit(1)
}
}
// Initialize the Machine.
func InitFunc(mc *driver.MachineConfig) (driver.Machine, error) {
verbose = mc.Verbose
m, err := GetMachine(mc.VM)
if err != nil && mc.Init {
return CreateMachine(mc)
}
return m, err
}
type shareSlice map[string]string
const shareSliceSep = "="
func (s shareSlice) String() string {
var ret []string
for name, dir := range s {
ret = append(ret, fmt.Sprintf("%s%s%s", dir, shareSliceSep, name))
}
return fmt.Sprintf("[%s]", strings.Join(ret, " "))
}
func (s *shareSlice) Set(shareDir string) error {
var shareName string
if i := strings.Index(shareDir, shareSliceSep); i >= 0 {
shareName = shareDir[i+1:]
shareDir = shareDir[:i]
}
if shareName == "" {
// parts of the VBox internal code are buggy with share names that start with "/"
shareName = strings.TrimLeft(shareDir, "/")
// TODO do some basic Windows -> MSYS path conversion
// ie, s!^([a-z]+):[/\\]+!\1/!; s!\\!/!g
}
if *s == nil {
*s = shareSlice{}
}
(*s)[shareName] = shareDir
return nil
}
// Add cmdline params for this driver
func ConfigFlags(B2D *driver.MachineConfig, flags *flag.FlagSet) error {
//B2D.DriverCfg["virtualbox"] = cfg
flags.StringVar(&cfg.VMDK, "basevmdk", "", "Path to VMDK to use as base for persistent partition")
cfg.VBM = "VBoxManage"
if runtime.GOOS == "windows" {
p := "C:\\Program Files\\Oracle\\VirtualBox"
if t := os.Getenv("VBOX_INSTALL_PATH"); t != "" {
p = t
} else if t = os.Getenv("VBOX_MSI_INSTALL_PATH"); t != "" {
p = t
}
cfg.VBM = filepath.Join(p, "VBoxManage.exe")
}
flags.StringVar(&cfg.VBM, "vbm", cfg.VBM, "path to VirtualBox management utility.")
// TODO once boot2docker improves, replace this all with homeDir() from config.go so we only share the current user's HOME by default
shareDefault = "disable"
switch runtime.GOOS {
case "darwin":
shareDefault = "/Users" + shareSliceSep + "Users"
case "windows":
shareDefault = "C:\\Users" + shareSliceSep + "c/Users"
}
var defaultText string
if shareDefault != "disable" {
defaultText = "(defaults to '" + shareDefault + "' if no shares are specified; use 'disable' to explicitly prevent any shares from being created) "
}
flags.Var(&cfg.shares, "vbox-share", fmt.Sprintf("%sList of directories to share during 'up|start|boot' via VirtualBox Guest Additions, with optional labels", defaultText))
return nil
}
// Convert bool to "on"/"off"
func bool2string(b bool) string {
if b {
return "on"
}
return "off"
}
// Test if flag is set. Return "on" or "off".
func (f Flag) Get(o Flag) string {
return bool2string(f&o == o)
}
// Machine information.
type Machine struct {
Name string
UUID string
Iso string
State driver.MachineState
CPUs uint
Memory uint // main memory (in MB)
VRAM uint // video memory (in MB)
CfgFile string
BaseFolder string
OSType string
Flag Flag
BootOrder []string // max 4 slots, each in {none|floppy|dvd|disk|net}
DockerPort uint
SSHPort uint
SerialFile string
}
// Refresh reloads the machine information.
func (m *Machine) Refresh() error {
id := m.Name
if id == "" {
id = m.UUID
}
mm, err := GetMachine(id)
if err != nil {
return err
}
*m = *mm
return nil
}
// Start starts the machine.
func (m *Machine) Start() error {
switch m.State {
case driver.Paused:
return vbm("controlvm", m.Name, "resume")
case driver.Poweroff, driver.Aborted:
if err := m.setUpShares(); err != nil {
return err
}
fallthrough
case driver.Saved:
return vbm("startvm", m.Name, "--type", "headless")
}
if err := m.Refresh(); err == nil {
if m.State != driver.Running {
return fmt.Errorf("Failed to start", m.Name)
}
}
return nil
}
// Suspend suspends the machine and saves its state to disk.
func (m *Machine) Save() error {
switch m.State {
case driver.Paused:
if err := m.Start(); err != nil {
return err
}
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "savestate")
}
// Pause pauses the execution of the machine.
func (m *Machine) Pause() error {
switch m.State {
case driver.Paused, driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "pause")
}
// Stop gracefully stops the machine.
func (m *Machine) Stop() error {
switch m.State {
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
case driver.Paused:
if err := m.Start(); err != nil {
return err
}
}
// busy wait until the machine is stopped
for i := 0; i < 10; i++ {
if err := vbm("controlvm", m.Name, "acpipowerbutton"); err != nil {
return err
}
time.Sleep(1 * time.Second)
if err := m.Refresh(); err != nil {
return err
}
if m.State == driver.Poweroff {
return nil
}
}
return fmt.Errorf("timed out waiting for VM to stop")
}
// Poweroff forcefully stops the machine. State is lost and might corrupt the disk image.
func (m *Machine) Poweroff() error {
switch m.State {
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "poweroff")
}
// Restart gracefully restarts the machine.
func (m *Machine) Restart() error {
switch m.State {
case driver.Paused, driver.Saved:
if err := m.Start(); err != nil {
return err
}
}
if err := m.Stop(); err != nil {
return err
}
return m.Start()
}
// Reset forcefully restarts the machine. State is lost and might corrupt the disk image.
func (m *Machine) Reset() error {
switch m.State {
case driver.Paused, driver.Saved:
if err := m.Start(); err != nil {
return err
}
}
return vbm("controlvm", m.Name, "reset")
}
// Delete deletes the machine and associated disk images.
func (m *Machine) Delete() error {
if err := m.Poweroff(); err != nil {
return err
}
return vbm("unregistervm", m.Name, "--delete")
}
// Get current state
func (m *Machine) GetName() string {
return m.Name
}
// Get current state
func (m *Machine) GetState() driver.MachineState {
return m.State
}
// Get serial file
func (m *Machine) GetSerialFile() string {
return m.SerialFile
}
// Get Docker port
func (m *Machine) GetDockerPort() uint {
return m.DockerPort
}
// Get SSH port
func (m *Machine) GetSSHPort() uint {
return m.SSHPort
}
// GetMachine finds a machine by its name or UUID.
func GetMachine(id string) (*Machine, error) {
stdout, stderr, err := vbmOutErr("showvminfo", id, "--machinereadable")
if err != nil {
if reMachineNotFound.FindString(stderr) != "" {
return nil, driver.ErrMachineNotExist
}
return nil, err
}
s := bufio.NewScanner(strings.NewReader(stdout))
m := &Machine{}
for s.Scan() {
res := reVMInfoLine.FindStringSubmatch(s.Text())
if res == nil {
continue
}
key := res[1]
if key == "" {
key = res[2]
}
val := res[3]
if val == "" {
val = res[4]
}
switch key {
case "name":
m.Name = val
case "UUID":
m.UUID = val
case "SATA-0-0":
m.Iso = val
case "VMState":
m.State = driver.MachineState(val)
case "memory":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.Memory = uint(n)
case "cpus":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.CPUs = uint(n)
case "vram":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.VRAM = uint(n)
case "CfgFile":
m.CfgFile = val
m.BaseFolder = filepath.Dir(val)
case "uartmode1":
// uartmode1="server,/home/sven/.boot2docker/boot2docker-vm.sock"
vals := strings.Split(val, ",")
if len(vals) >= 2 {
m.SerialFile = vals[1]
}
default:
if strings.HasPrefix(key, "Forwarding(") {
// "Forwarding(\d*)" are ordered by the name inside the val, not fixed order.
// Forwarding(0)="docker,tcp,127.0.0.1,5555,,"
// Forwarding(1)="ssh,tcp,127.0.0.1,2222,,22"
vals := strings.Split(val, ",")
n, err := strconv.ParseUint(vals[3], 10, 32)
if err != nil {
return nil, err
}
switch vals[0] {
case "docker":
m.DockerPort = uint(n)
case "ssh":
m.SSHPort = uint(n)
}
}
}
}
if err := s.Err(); err != nil {
return nil, err
}
return m, nil
}
// ListMachines lists all registered machines.
func ListMachines() ([]string, error) {
out, err := vbmOut("list", "vms")
if err != nil {
return nil, err
}
ms := []string{}
s := bufio.NewScanner(strings.NewReader(out))
for s.Scan() {
res := reVMNameUUID.FindStringSubmatch(s.Text())
if res == nil {
continue
}
ms = append(ms, res[1])
}
if err := s.Err(); err != nil {
return nil, err
}
return ms, nil
}
// CreateMachine creates a new machine. If basefolder is empty, use default.
func CreateMachine(mc *driver.MachineConfig) (*Machine, error) {
if mc.VM == "" {
return nil, fmt.Errorf("machine name is empty")
}
// Check if a machine with the given name already exists.
machineNames, err := ListMachines()
if err != nil {
return nil, err
}
for _, m := range machineNames {
if m == mc.VM {
return nil, driver.ErrMachineExist
}
}
// Create and register the machine.
args := []string{"createvm", "--name", mc.VM, "--register"}
if err := vbm(args...); err != nil {
return nil, err
}
m, err := GetMachine(mc.VM)
if err != nil {
return nil, err
}
// Configure VM for Boot2docker
SetExtra(mc.VM, "VBoxInternal/CPUM/EnableHVP", "1")
m.OSType = "Linux26_64"
if mc.CPUs > 0 {
m.CPUs = mc.CPUs
} else {
m.CPUs = uint(runtime.NumCPU())
}
if m.CPUs > 32 {
m.CPUs = 32
}
m.Memory = mc.Memory
m.SerialFile = mc.SerialFile
m.Flag |= F_pae
m.Flag |= F_longmode // important: use x86-64 processor
m.Flag |= F_rtcuseutc
m.Flag |= F_acpi
m.Flag |= F_ioapic
m.Flag |= F_hpet
m.Flag |= F_hwvirtex
m.Flag |= F_vtxvpid
m.Flag |= F_largepages
m.Flag |= F_nestedpaging
// Set VM boot order
m.BootOrder = []string{"dvd"}
if err := m.Modify(); err != nil {
return m, err
}
// Set NIC #1 to use NAT
m.SetNIC(1, driver.NIC{Network: driver.NICNetNAT, Hardware: driver.VirtIO})
pfRules := map[string]driver.PFRule{
"ssh": {Proto: driver.PFTCP, HostIP: net.ParseIP("127.0.0.1"), HostPort: mc.SSHPort, GuestPort: driver.SSHPort},
}
if mc.DockerPort > 0 {
pfRules["docker"] = driver.PFRule{Proto: driver.PFTCP, HostIP: net.ParseIP("127.0.0.1"), HostPort: mc.DockerPort, GuestPort: driver.DockerPort}
}
for name, rule := range pfRules {
if err := m.AddNATPF(1, name, rule); err != nil {
return m, err
}
}
hostIFName, err := getHostOnlyNetworkInterface(mc)
if err != nil {
return m, err
}
// Set NIC #2 to use host-only
if err := m.SetNIC(2, driver.NIC{Network: driver.NICNetHostonly, Hardware: driver.VirtIO, HostonlyAdapter: hostIFName}); err != nil {
return m, err
}
// Set VM storage
if err := m.AddStorageCtl("SATA", driver.StorageController{SysBus: driver.SysBusSATA, HostIOCache: true, Bootable: true, Ports: 4}); err != nil {
return m, err
}
// Attach ISO image
if err := m.AttachStorage("SATA", driver.StorageMedium{Port: 0, Device: 0, DriveType: driver.DriveDVD, Medium: mc.ISO}); err != nil {
return m, err
}
diskImg := filepath.Join(m.BaseFolder, fmt.Sprintf("%s.vmdk", mc.VM))
if _, err := os.Stat(diskImg); err != nil {
if !os.IsNotExist(err) {
return m, err
}
if cfg.VMDK != "" {
if err := copyDiskImage(diskImg, cfg.VMDK); err != nil {
return m, err
}
} else {
magicString := "boot2docker, please format-me"
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
// magicString first so the automount script knows to format the disk
file := &tar.Header{Name: magicString, Size: int64(len(magicString))}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(magicString)); err != nil {
return m, err
}
// .ssh/key.pub => authorized_keys
file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
pubKey, err := ioutil.ReadFile(mc.SSHKey + ".pub")
if err != nil {
return m, err
}
file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(pubKey)); err != nil {
return m, err
}
file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(pubKey)); err != nil {
return m, err
}
if err := tw.Close(); err != nil {
return m, err
}
if err := makeDiskImage(diskImg, mc.DiskSize, buf.Bytes()); err != nil {
return m, err
}
if verbose {
fmt.Println("Initializing disk with ssh keys")
fmt.Printf("WRITING: %s\n-----\n", buf)
}
}
}
if err := m.AttachStorage("SATA", driver.StorageMedium{Port: 1, Device: 0, DriveType: driver.DriveHDD, Medium: diskImg}); err != nil {
return m, err
}
return m, nil
}
func (m *Machine) setUpShares() error {
// let VBoxService do nice magic automounting (when it's used)
if err := vbm("guestproperty", "set", m.Name, "/VirtualBox/GuestAdd/SharedFolders/MountPrefix", "/"); err != nil {
return err
}
if err := vbm("guestproperty", "set", m.Name, "/VirtualBox/GuestAdd/SharedFolders/MountDir", "/"); err != nil {
return err
}
// set up some shared folders as appropriate
if len(cfg.shares) == 0 {
cfg.shares.Set(shareDefault)
}
for shareName, shareDir := range cfg.shares {
if shareDir == "disable" {
continue
}
if _, err := os.Stat(shareDir); err != nil {
return err
}
// woo, shareDir exists! let's carry on!
if err := vbm("sharedfolder", "add", m.Name, "--name", shareName, "--hostpath", shareDir, "--automount"); err != nil {
return err
}
// enable symlinks
if err := vbm("setextradata", m.Name, "VBoxInternal2/SharedFoldersEnableSymlinksCreate/"+shareName, "1"); err != nil {
return err
}
}
return nil
}
// Modify changes the settings of the machine.
func (m *Machine) Modify() error {
args := []string{"modifyvm", m.Name,
"--firmware", "bios",
"--bioslogofadein", "off",
"--bioslogofadeout", "off",
"--bioslogodisplaytime", "0",
"--biosbootmenu", "disabled",
// the DNS Host Resolver doesn't support SRV records
// the DNS proxy has performance issues
// direct DNS pass-through doesn't support roaming laptops well
// we can't win, so let's go direct and at least get performance
"--natdnshostresolver1", "off",
"--natdnsproxy1", "off",
"--ostype", m.OSType,
"--cpus", fmt.Sprintf("%d", m.CPUs),
"--memory", fmt.Sprintf("%d", m.Memory),
"--vram", fmt.Sprintf("%d", m.VRAM),
"--acpi", m.Flag.Get(F_acpi),
"--ioapic", m.Flag.Get(F_ioapic),
"--rtcuseutc", m.Flag.Get(F_rtcuseutc),
"--cpuhotplug", m.Flag.Get(F_cpuhotplug),
"--pae", m.Flag.Get(F_pae),
"--longmode", m.Flag.Get(F_longmode),
"--hpet", m.Flag.Get(F_hpet),
"--hwvirtex", m.Flag.Get(F_hwvirtex),
"--triplefaultreset", m.Flag.Get(F_triplefaultreset),
"--nestedpaging", m.Flag.Get(F_nestedpaging),
"--largepages", m.Flag.Get(F_largepages),
"--vtxvpid", m.Flag.Get(F_vtxvpid),
"--vtxux", m.Flag.Get(F_vtxux),
"--accelerate3d", m.Flag.Get(F_accelerate3d),
}
//if runtime.GOOS != "windows" {
args = append(args,
"--uart1", "0x3F8", "4",
"--uartmode1", "server", m.SerialFile,
)
//}
for i, dev := range m.BootOrder {
if i > 3 {
break // Only four slots `--boot{1,2,3,4}`. Ignore the rest.
}
args = append(args, fmt.Sprintf("--boot%d", i+1), dev)
}
if err := vbm(args...); err != nil {
return err
}
return m.Refresh()
}
// AddNATPF adds a NAT port forarding rule to the n-th NIC with the given name.
func (m *Machine) AddNATPF(n int, name string, rule driver.PFRule) error {
return vbm("modifyvm", m.Name, fmt.Sprintf("--natpf%d", n),
fmt.Sprintf("%s,%s", name, rule.Format()))
}
// DelNATPF deletes the NAT port forwarding rule with the given name from the n-th NIC.
func (m *Machine) DelNATPF(n int, name string) error {
return vbm("controlvm", m.Name, fmt.Sprintf("natpf%d", n), "delete", name)
}
// SetNIC set the n-th NIC.
func (m *Machine) SetNIC(n int, nic driver.NIC) error {
args := []string{"modifyvm", m.Name,
fmt.Sprintf("--nic%d", n), string(nic.Network),
fmt.Sprintf("--nictype%d", n), string(nic.Hardware),
fmt.Sprintf("--cableconnected%d", n), "on",
}
if nic.Network == "hostonly" {
args = append(args, fmt.Sprintf("--hostonlyadapter%d", n), nic.HostonlyAdapter)
}
return vbm(args...)
}
// AddStorageCtl adds a storage controller with the given name.
func (m *Machine) AddStorageCtl(name string, ctl driver.StorageController) error {
args := []string{"storagectl", m.Name, "--name", name}
if ctl.SysBus != "" {
args = append(args, "--add", string(ctl.SysBus))
}
if ctl.Ports > 0 {
args = append(args, "--portcount", fmt.Sprintf("%d", ctl.Ports))
}
if ctl.Chipset != "" {
args = append(args, "--controller", string(ctl.Chipset))
}
args = append(args, "--hostiocache", bool2string(ctl.HostIOCache))
args = append(args, "--bootable", bool2string(ctl.Bootable))
return vbm(args...)
}
// DelStorageCtl deletes the storage controller with the given name.
func (m *Machine) DelStorageCtl(name string) error {
return vbm("storagectl", m.Name, "--name", name, "--remove")
}
// AttachStorage attaches a storage medium to the named storage controller.
func (m *Machine) AttachStorage(ctlName string, medium driver.StorageMedium) error {
return vbm("storageattach", m.Name, "--storagectl", ctlName,
"--port", fmt.Sprintf("%d", medium.Port),
"--device", fmt.Sprintf("%d", medium.Device),
"--type", string(medium.DriveType),
"--medium", medium.Medium,
)
}
| [
"\"VBOX_INSTALL_PATH\"",
"\"VBOX_MSI_INSTALL_PATH\""
]
| []
| [
"VBOX_MSI_INSTALL_PATH",
"VBOX_INSTALL_PATH"
]
| [] | ["VBOX_MSI_INSTALL_PATH", "VBOX_INSTALL_PATH"] | go | 2 | 0 | |
src/rdpg-acceptance-tests/rdpg-service/consul/consul_test.go | package consul_test
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
consulapi "github.com/hashicorp/consul/api"
"github.com/starkandwayne/rdpg-acceptance-tests/helpers"
. "github.com/starkandwayne/rdpg-acceptance-tests/rdpg-service/helper-functions"
)
type EnvVar struct {
Key string `json:"key"`
Value string `json:"value"`
}
func fetchConsulValue(key string) (value string, err error) {
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = helpers.TestConfig.ConsulIP
consulClient, _ := consulapi.NewClient(consulConfig)
kv := consulClient.KV()
kvp, _, err := kv.Get(key, nil)
if err != nil {
fmt.Println(`%s`, err)
return value, err
}
if kvp == nil {
return
}
value = string(kvp.Value)
return
}
func fetchAdminAPIEnvKeyValue(ip, envKey string) (value string, err error) {
// TODO: Allow for passing in Admin API port/user/pass
adminPort := os.Getenv("RDPGD_ADMIN_PORT")
adminUser := os.Getenv("RDPGD_ADMIN_USER")
adminPass := os.Getenv("RDPGD_ADMIN_PASS")
url := fmt.Sprintf("http://rdpg:admin@%s:%s/env/%s", ip, adminPort, envKey)
req, err := http.NewRequest("GET", url, bytes.NewBuffer([]byte("{}")))
req.SetBasicAuth(adminUser, adminPass)
httpClient := &http.Client{}
resp, err := httpClient.Do(req)
if err != nil {
fmt.Println(`%s`, err)
return value, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(`%s`, err)
return value, err
}
ev := EnvVar{}
err = json.Unmarshal(body, &ev)
return ev.Value, err
}
var _ = Describe("Consul Checks...", func() {
It("Check Node Counts", func() {
expectedPosgresqlSCNodeCount := 1
expectedPgbdrScNodeCount := 2
expectedPgbdrMcNodeCount := 3
allClusterNames := GetAllClusterNames()
for _, key := range allClusterNames {
tempClusterNodes := GetNodesByClusterName(key)
if key == "rdpgmc" {
fmt.Printf("Found %d of %d Management Cluster %s Nodes\n", len(tempClusterNodes), expectedPgbdrMcNodeCount, key)
Expect(len(tempClusterNodes)).To(Equal(expectedPgbdrMcNodeCount))
} else {
clusterService := GetClusterServiceType(tempClusterNodes[0].ServiceName)
if clusterService == `pgbdr` {
fmt.Printf("Found %d of %d Service Cluster %s Nodes\n", len(tempClusterNodes), expectedPgbdrScNodeCount, key)
Expect(len(tempClusterNodes)).To(Equal(expectedPgbdrScNodeCount))
} else {
fmt.Printf("Found %d of %d Service Cluster %s Nodes\n", len(tempClusterNodes), expectedPosgresqlSCNodeCount, key)
Expect(len(tempClusterNodes)).To(Equal(expectedPosgresqlSCNodeCount))
}
}
}
})
It("Check Datacenter Name", func() {
rdpgmcNodes := GetNodesByClusterName("rdpgmc")
datacenter := helpers.TestConfig.Datacenter
host := "consul.service." + datacenter + ".consul"
//NOTE: ConsulIP also contains the port.
consulIP := strings.Split(helpers.TestConfig.ConsulIP, ":")[0]
fmt.Printf("Digging host %s and address %s\n", host, consulIP)
digResult, err := helpers.Dig(host, consulIP)
fmt.Printf("Dig Answer count is %d, while the number of Management Cluster Nodes is %d\n", len(digResult.Answers), len(rdpgmcNodes))
Expect(err).Should(BeNil())
Expect(len(digResult.Answers)).To(Equal(len(rdpgmcNodes)))
for _, res := range digResult.Answers {
fmt.Printf("Dig Answer host name for %s is %s\n", res.Address, res.Host)
Expect(res.Host).To(Equal(host + "."))
}
})
It("Check Leader", func() {
leader := GetLeader()
Expect(leader).NotTo(BeEmpty())
})
It("Check Peers", func() {
peersNum := 3
peers := GetPeers()
Expect(len(peers)).To(Equal(peersNum))
})
It("Check Health of all Services on Each Node", func() {
allNodeNames := GetAllNodeNames()
for _, name := range allNodeNames {
healthCheck := GetNodeHealthByNodeName(name)
Expect(len(healthCheck)).To(BeNumerically(">=", 1))
for i := 0; i < len(healthCheck); i++ {
fmt.Printf("The status for CheckId: %s on Node: %s is %s.\n", healthCheck[i].CheckID, name, healthCheck[i].Status)
Expect(healthCheck[i].CheckID).NotTo(BeEmpty())
Expect(healthCheck[i].Status).To(Equal("passing"))
}
}
})
It("Check Instances Hard and Soft Limits are set correctly in K/V Store", func() {
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = helpers.TestConfig.ConsulIP
consulClient, _ := consulapi.NewClient(consulConfig)
catalog := consulClient.Catalog()
services, _, _ := catalog.Services(nil)
re := regexp.MustCompile(`^(rdpg(sc[0-9]+$))|(sc-([[:alnum:]|-])*m[0-9]+-c[0-9]+$)`)
for clusterName := range services {
if re.MatchString(clusterName) {
fmt.Printf("Cluster %s:\n", clusterName)
clusterNodes, _, _ := catalog.Service(clusterName, "", nil)
manifestValue, _ := fetchAdminAPIEnvKeyValue(clusterNodes[0].Address, `RDPGD_INSTANCE_ALLOWED`)
consulKey := fmt.Sprintf("rdpg/%s/capacity/instances/allowed", clusterName)
consulValue, _ := fetchConsulValue(consulKey)
fmt.Printf("Soft Instances Limit (allowed) manifest=%s, consul=%s \n", manifestValue, consulValue)
Expect(consulValue).To(Equal(manifestValue))
manifestValue, _ = fetchAdminAPIEnvKeyValue(clusterNodes[0].Address, `RDPGD_INSTANCE_LIMIT`)
consulKey = fmt.Sprintf("rdpg/%s/capacity/instances/limit", clusterName)
consulValue, _ = fetchConsulValue(consulKey)
fmt.Printf("Hard Instances Limit (limit) manifest=%s, consul=%s \n", manifestValue, consulValue)
Expect(consulValue).To(Equal(manifestValue))
}
}
})
})
| [
"\"RDPGD_ADMIN_PORT\"",
"\"RDPGD_ADMIN_USER\"",
"\"RDPGD_ADMIN_PASS\""
]
| []
| [
"RDPGD_ADMIN_PASS",
"RDPGD_ADMIN_USER",
"RDPGD_ADMIN_PORT"
]
| [] | ["RDPGD_ADMIN_PASS", "RDPGD_ADMIN_USER", "RDPGD_ADMIN_PORT"] | go | 3 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.