filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
yocto/poky/meta/lib/oe/package_manager.py | from abc import ABCMeta, abstractmethod
import os
import glob
import subprocess
import shutil
import multiprocessing
import re
import bb
import tempfile
import oe.utils
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
index_cmd = arg
try:
bb.note("Executing '%s' ..." % index_cmd)
result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
return("Index creation command '%s' failed with return code %d:\n%s" %
(e.cmd, e.returncode, e.output))
if result:
bb.note(result)
return None
class Indexer(object):
__metaclass__ = ABCMeta
def __init__(self, d, deploy_dir):
self.d = d
self.deploy_dir = deploy_dir
@abstractmethod
def write_index(self):
pass
class RpmIndexer(Indexer):
def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
package_archs = {
'default': [],
}
target_os = {
'default': "",
}
if arch_var is not None and os_var is not None:
package_archs['default'] = self.d.getVar(arch_var, True).split()
package_archs['default'].reverse()
target_os['default'] = self.d.getVar(os_var, True).strip()
else:
package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
# arch order is reversed. This ensures the -best- match is
# listed first!
package_archs['default'].reverse()
target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
multilibs = self.d.getVar('MULTILIBS', True) or ""
for ext in multilibs.split():
eext = ext.split(':')
if len(eext) > 1 and eext[0] == 'multilib':
localdata = bb.data.createCopy(self.d)
default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
default_tune = localdata.getVar(default_tune_key, False)
if default_tune is None:
default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
default_tune = localdata.getVar(default_tune_key, False)
if default_tune:
localdata.setVar("DEFAULTTUNE", default_tune)
bb.data.update_data(localdata)
package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
True).split()
package_archs[eext[1]].reverse()
target_os[eext[1]] = localdata.getVar("TARGET_OS",
True).strip()
ml_prefix_list = dict()
for mlib in package_archs:
if mlib == 'default':
ml_prefix_list[mlib] = package_archs[mlib]
else:
ml_prefix_list[mlib] = list()
for arch in package_archs[mlib]:
if arch in ['all', 'noarch', 'any']:
ml_prefix_list[mlib].append(arch)
else:
ml_prefix_list[mlib].append(mlib + "_" + arch)
return (ml_prefix_list, target_os)
def write_index(self):
sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
archs = set()
for item in mlb_prefix_list:
archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
if len(archs) == 0:
archs = archs.union(set(all_mlb_pkg_archs))
archs = archs.union(set(sdk_pkg_archs))
rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
index_cmds = []
rpm_dirs_found = False
for arch in archs:
dbpath = os.path.join(self.d.getVar('WORKDIR', True), 'rpmdb', arch)
if os.path.exists(dbpath):
bb.utils.remove(dbpath, True)
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
continue
index_cmds.append("%s --dbpath %s --update -q %s" % \
(rpm_createrepo, dbpath, arch_dir))
rpm_dirs_found = True
if not rpm_dirs_found:
bb.note("There are no packages in %s" % self.deploy_dir)
return
result = oe.utils.multiprocess_exec(index_cmds, create_index)
if result:
bb.fatal('%s' % ('\n'.join(result)))
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
"MULTILIB_ARCHS"]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
open(os.path.join(self.deploy_dir, "Packages"), "w").close()
index_cmds = []
for arch_var in arch_vars:
archs = self.d.getVar(arch_var, True)
if archs is None:
continue
for arch in archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
pkgs_file = os.path.join(pkgs_dir, "Packages")
if not os.path.isdir(pkgs_dir):
continue
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
index_cmds.append('%s -r %s -p %s -m %s' %
(opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
if len(index_cmds) == 0:
bb.note("There are no packages in %s!" % self.deploy_dir)
return
result = oe.utils.multiprocess_exec(index_cmds, create_index)
if result:
bb.fatal('%s' % ('\n'.join(result)))
class DpkgIndexer(Indexer):
def _create_configs(self):
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
with open(os.path.join(self.apt_conf_dir, "preferences"),
"w") as prefs_file:
pass
with open(os.path.join(self.apt_conf_dir, "sources.list"),
"w+") as sources_file:
pass
with open(self.apt_conf_file, "w") as apt_conf:
with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
"apt", "apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
line = re.sub("#ROOTFS#", "/dev/null", line)
line = re.sub("#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
def write_index(self):
self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
"apt-ftparchive")
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self._create_configs()
os.environ['APT_CONFIG'] = self.apt_conf_file
pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
if pkg_archs is not None:
arch_list = pkg_archs.split()
sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
if sdk_pkg_archs is not None:
for a in sdk_pkg_archs.split():
if a not in pkg_archs:
arch_list.append(a)
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
gzip = bb.utils.which(os.getenv('PATH'), "gzip")
index_cmds = []
deb_dirs_found = False
for arch in arch_list:
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
continue
cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
cmd += "%s -fc Packages > Packages.gz;" % gzip
with open(os.path.join(arch_dir, "Release"), "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
index_cmds.append(cmd)
deb_dirs_found = True
if not deb_dirs_found:
bb.note("There are no packages in %s" % self.deploy_dir)
return
result = oe.utils.multiprocess_exec(index_cmds, create_index)
if result:
bb.fatal('%s' % ('\n'.join(result)))
class PkgsList(object):
__metaclass__ = ABCMeta
def __init__(self, d, rootfs_dir):
self.d = d
self.rootfs_dir = rootfs_dir
@abstractmethod
def list(self, format=None):
pass
class RpmPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
super(RpmPkgsList, self).__init__(d, rootfs_dir)
self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
self.ml_prefix_list, self.ml_os_list = \
RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
# Determine rpm version
cmd = "%s --version" % self.rpm_cmd
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Getting rpm version failed. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
self.rpm_version = int(output.split()[-1].split('.')[0])
'''
Translate the RPM/Smart format names to the OE multilib format names
'''
def _pkg_translate_smart_to_oe(self, pkg, arch):
new_pkg = pkg
new_arch = arch
fixed_arch = arch.replace('_', '-')
found = 0
for mlib in self.ml_prefix_list:
for cmp_arch in self.ml_prefix_list[mlib]:
fixed_cmp_arch = cmp_arch.replace('_', '-')
if fixed_arch == fixed_cmp_arch:
if mlib == 'default':
new_pkg = pkg
new_arch = cmp_arch
else:
new_pkg = mlib + '-' + pkg
# We need to strip off the ${mlib}_ prefix on the arch
new_arch = cmp_arch.replace(mlib + '_', '')
# Workaround for bug 3565. Simply look to see if we
# know of a package with that name, if not try again!
filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
'runtime-reverse',
new_pkg)
if os.path.exists(filename):
found = 1
break
if found == 1 and fixed_arch == fixed_cmp_arch:
break
#bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
return new_pkg, new_arch
def _list_pkg_deps(self):
cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
"-t", self.image_rpmlib]
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the package dependencies. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
return output
def list(self, format=None):
if format == "deps":
if self.rpm_version == 4:
bb.fatal("'deps' format dependency listings are not supported with rpm 4 since rpmresolve does not work")
return self._list_pkg_deps()
cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
cmd += ' -D "_dbpath /var/lib/rpm" -qa'
if self.rpm_version == 4:
cmd += " --qf '[%{NAME} %{ARCH} %{VERSION}\n]'"
else:
cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
try:
# bb.note(cmd)
tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
output = list()
for line in tmp_output.split('\n'):
if len(line.strip()) == 0:
continue
pkg = line.split()[0]
arch = line.split()[1]
ver = line.split()[2]
if self.rpm_version == 4:
pkgorigin = "unknown"
else:
pkgorigin = line.split()[3]
new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
if format == "arch":
output.append('%s %s' % (new_pkg, new_arch))
elif format == "file":
output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch))
elif format == "ver":
output.append('%s %s %s' % (new_pkg, new_arch, ver))
else:
output.append('%s' % (new_pkg))
output.sort()
return '\n'.join(output)
class OpkgPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, config_file):
super(OpkgPkgsList, self).__init__(d, rootfs_dir)
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
self.opkg_args += self.d.getVar("OPKG_ARGS", True)
def list(self, format=None):
opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
if format == "arch":
cmd = "%s %s status | %s -a" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
elif format == "file":
cmd = "%s %s status | %s -f" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
elif format == "ver":
cmd = "%s %s status | %s -v" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
elif format == "deps":
cmd = "%s %s status | %s" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
else:
cmd = "%s %s list_installed | cut -d' ' -f1" % \
(self.opkg_cmd, self.opkg_args)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
if output and format == "file":
tmp_output = ""
for line in output.split('\n'):
pkg, pkg_file, pkg_arch = line.split()
full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
if os.path.exists(full_path):
tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
else:
tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
output = tmp_output
return output
class DpkgPkgsList(PkgsList):
def list(self, format=None):
cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
"--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
"-W"]
if format == "arch":
cmd.append("-f=${Package} ${PackageArch}\n")
elif format == "file":
cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n")
elif format == "ver":
cmd.append("-f=${Package} ${PackageArch} ${Version}\n")
elif format == "deps":
cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
else:
cmd.append("-f=${Package}\n")
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
if format == "file":
tmp_output = ""
for line in tuple(output.split('\n')):
if not line.strip():
continue
pkg, pkg_file, pkg_arch = line.split()
full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
if os.path.exists(full_path):
tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
else:
tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
output = tmp_output
elif format == "deps":
opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
file_out = tempfile.NamedTemporaryFile()
file_out.write(output)
file_out.flush()
try:
output = subprocess.check_output("cat %s | %s" %
(file_out.name, opkg_query_cmd),
stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as e:
file_out.close()
bb.fatal("Cannot compute packages dependencies. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
file_out.close()
return output
class PackageManager(object):
"""
This is an abstract class. Do not instantiate this directly.
"""
__metaclass__ = ABCMeta
def __init__(self, d):
self.d = d
self.deploy_dir = None
self.deploy_lock = None
self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
"""
Update the package manager package database.
"""
@abstractmethod
def update(self):
pass
"""
Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
True, installation failures are ignored.
"""
@abstractmethod
def install(self, pkgs, attempt_only=False):
pass
"""
Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
is False, the any dependencies are left in place.
"""
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
pass
"""
This function creates the index files
"""
@abstractmethod
def write_index(self):
pass
@abstractmethod
def remove_packaging_data(self):
pass
@abstractmethod
def list_installed(self, format=None):
pass
@abstractmethod
def insert_feeds_uris(self):
pass
"""
Install complementary packages based upon the list of currently installed
packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
these packages, if they don't exist then no error will occur. Note: every
backend needs to call this function explicitly after the normal package
installation
"""
def install_complementary(self, globs=None):
# we need to write the list of installed packages to a file because the
# oe-pkgdata-util reads it from a file
installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
"installed_pkgs.txt")
with open(installed_pkgs_file, "w+") as installed_pkgs:
installed_pkgs.write(self.list_installed("arch"))
if globs is None:
globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
split_linguas = set()
for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
split_linguas.add(translation)
split_linguas.add(translation.split('-')[0])
split_linguas = sorted(split_linguas)
for lang in split_linguas:
globs += " *-locale-%s" % lang
if globs is None:
return
cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
"-p", self.d.getVar('PKGDATA_DIR', True), "glob", installed_pkgs_file,
globs]
exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
if exclude:
cmd.extend(['-x', exclude])
try:
bb.note("Installing complementary packages ...")
complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output))
self.install(complementary_pkgs.split(), attempt_only=True)
def deploy_dir_lock(self):
if self.deploy_dir is None:
raise RuntimeError("deploy_dir is not set!")
lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
self.deploy_lock = bb.utils.lockfile(lock_file_name)
def deploy_dir_unlock(self):
if self.deploy_lock is None:
return
bb.utils.unlockfile(self.deploy_lock)
self.deploy_lock = None
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
providename=None,
arch_var=None,
os_var=None):
super(RpmPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.target_vendor = target_vendor
self.task_name = task_name
self.providename = providename
self.fullpkglist = list()
self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
self.install_dir_name = "oe_install"
self.install_dir_path = os.path.join(self.target_rootfs, self.install_dir_name)
self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
self.smart_opt = "--quiet --data-dir=" + os.path.join(target_rootfs,
'var/lib/smart')
self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
self.task_name)
self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
self.indexer = RpmIndexer(self.d, self.deploy_dir)
self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
self.rpm_version = self.pkgs_list.rpm_version
self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
def insert_feeds_uris(self):
if self.feed_uris == "":
return
# List must be prefered to least preferred order
default_platform_extra = set()
platform_extra = set()
bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
for mlib in self.ml_os_list:
for arch in self.ml_prefix_list[mlib]:
plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
if mlib == bbextendvariant:
default_platform_extra.add(plt)
else:
platform_extra.add(plt)
platform_extra = platform_extra.union(default_platform_extra)
arch_list = []
for canonical_arch in platform_extra:
arch = canonical_arch.split('-')[0]
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
uri_iterator = 0
channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list)
for uri in self.feed_uris.split():
for arch in arch_list:
bb.note('Note: adding Smart channel url%d%s (%s)' %
(uri_iterator, arch, channel_priority))
self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y'
% (uri_iterator, arch, uri, arch))
self._invoke_smart('channel --set url%d-%s priority=%d' %
(uri_iterator, arch, channel_priority))
channel_priority -= 5
uri_iterator += 1
'''
Create configs for rpm and smart, and multilib is supported
'''
def create_configs(self):
target_arch = self.d.getVar('TARGET_ARCH', True)
platform = '%s%s-%s' % (target_arch.replace('-', '_'),
self.target_vendor,
self.ml_os_list['default'])
# List must be prefered to least preferred order
default_platform_extra = list()
platform_extra = list()
bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
for mlib in self.ml_os_list:
for arch in self.ml_prefix_list[mlib]:
plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
if mlib == bbextendvariant:
if plt not in default_platform_extra:
default_platform_extra.append(plt)
else:
if plt not in platform_extra:
platform_extra.append(plt)
platform_extra = default_platform_extra + platform_extra
self._create_configs(platform, platform_extra)
def _invoke_smart(self, args):
cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
# bb.note(cmd)
try:
complementary_pkgs = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True)
# bb.note(complementary_pkgs)
return complementary_pkgs
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke smart. Command "
"'%s' returned %d:\n%s" % (cmd, e.returncode, e.output))
def _search_pkg_name_in_feeds(self, pkg, feed_archs):
for arch in feed_archs:
arch = arch.replace('-', '_')
regex_match = re.compile(r"^%s-[^-]*-[^-]*@%s$" % \
(re.escape(pkg), re.escape(arch)))
for p in self.fullpkglist:
if regex_match.match(p) is not None:
# First found is best match
# bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
return pkg + '@' + arch
return ""
'''
Translate the OE multilib format names to the RPM/Smart format names
It searched the RPM/Smart format names in probable multilib feeds first,
and then searched the default base feed.
'''
def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
new_pkgs = list()
for pkg in pkgs:
new_pkg = pkg
# Search new_pkg in probable multilibs first
for mlib in self.ml_prefix_list:
# Jump the default archs
if mlib == 'default':
continue
subst = pkg.replace(mlib + '-', '')
# if the pkg in this multilib feed
if subst != pkg:
feed_archs = self.ml_prefix_list[mlib]
new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
if not new_pkg:
# Failed to translate, package not found!
err_msg = '%s not found in the %s feeds (%s).\n' % \
(pkg, mlib, " ".join(feed_archs))
if not attempt_only:
err_msg += " ".join(self.fullpkglist)
bb.fatal(err_msg)
bb.warn(err_msg)
else:
new_pkgs.append(new_pkg)
break
# Apparently not a multilib package...
if pkg == new_pkg:
# Search new_pkg in default archs
default_archs = self.ml_prefix_list['default']
new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
if not new_pkg:
err_msg = '%s not found in the base feeds (%s).\n' % \
(pkg, ' '.join(default_archs))
if not attempt_only:
err_msg += " ".join(self.fullpkglist)
bb.fatal(err_msg)
bb.warn(err_msg)
else:
new_pkgs.append(new_pkg)
return new_pkgs
def _create_configs(self, platform, platform_extra):
# Setup base system configuration
bb.note("configuring RPM platform settings")
# Configure internal RPM environment when using Smart
os.environ['RPM_ETCRPM'] = self.etcrpm_dir
bb.utils.mkdirhier(self.etcrpm_dir)
# Setup temporary directory -- install...
if os.path.exists(self.install_dir_path):
bb.utils.remove(self.install_dir_path, True)
bb.utils.mkdirhier(os.path.join(self.install_dir_path, 'tmp'))
channel_priority = 5
platform_dir = os.path.join(self.etcrpm_dir, "platform")
sdkos = self.d.getVar("SDK_OS", True)
with open(platform_dir, "w+") as platform_fd:
platform_fd.write(platform + '\n')
for pt in platform_extra:
channel_priority += 5
if sdkos:
tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
platform_fd.write(tmp)
# Tell RPM that the "/" directory exist and is available
bb.note("configuring RPM system provides")
sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
bb.utils.mkdirhier(sysinfo_dir)
with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
dirnames.write("/\n")
if self.providename:
providename_dir = os.path.join(sysinfo_dir, "Providename")
if not os.path.exists(providename_dir):
providename_content = '\n'.join(self.providename)
providename_content += '\n'
open(providename_dir, "w+").write(providename_content)
# Configure RPM... we enforce these settings!
bb.note("configuring RPM DB settings")
# After change the __db.* cache size, log file will not be
# generated automatically, that will raise some warnings,
# so touch a bare log for rpm write into it.
if self.rpm_version == 5:
rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
if not os.path.exists(rpmlib_log):
bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
open(rpmlib_log, 'w+').close()
DB_CONFIG_CONTENT = "# ================ Environment\n" \
"set_data_dir .\n" \
"set_create_dir .\n" \
"set_lg_dir ./log\n" \
"set_tmp_dir ./tmp\n" \
"set_flags db_log_autoremove on\n" \
"\n" \
"# -- thread_count must be >= 8\n" \
"set_thread_count 64\n" \
"\n" \
"# ================ Logging\n" \
"\n" \
"# ================ Memory Pool\n" \
"set_cachesize 0 1048576 0\n" \
"set_mp_mmapsize 268435456\n" \
"\n" \
"# ================ Locking\n" \
"set_lk_max_locks 16384\n" \
"set_lk_max_lockers 16384\n" \
"set_lk_max_objects 16384\n" \
"mutex_set_max 163840\n" \
"\n" \
"# ================ Replication\n"
db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
if not os.path.exists(db_config_dir):
open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
# Create database so that smart doesn't complain (lazy init)
opt = "-qa"
if self.rpm_version == 4:
opt = "--initdb"
cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
self.rpm_cmd, self.target_rootfs, opt)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Create rpm database failed. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
# Configure smart
bb.note("configuring Smart settings")
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
True)
self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
self._invoke_smart('config --set rpm-extra-macros._var=%s' %
self.d.getVar('localstatedir', True))
cmd = "config --set rpm-extra-macros._tmppath=/%s/tmp" % (self.install_dir_name)
prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
if prefer_color:
if prefer_color not in ['0', '1', '2', '4']:
bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
"\t1: ELF32 wins\n"
"\t2: ELF64 wins\n"
"\t4: ELF64 N32 wins (mips64 or mips64el only)" %
prefer_color)
if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
['mips64', 'mips64el']:
bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
"only.")
self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
% prefer_color)
self._invoke_smart(cmd)
self._invoke_smart('config --set rpm-ignoresize=1')
# Write common configuration for host and target usage
self._invoke_smart('config --set rpm-nolinktos=1')
self._invoke_smart('config --set rpm-noparentdirs=1')
check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
if check_signature and check_signature.strip() == "0":
self._invoke_smart('config --set rpm-check-signatures=false')
for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
self._invoke_smart('flag --set ignore-recommends %s' % i)
# Do the following configurations here, to avoid them being
# saved for field upgrade
if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
self._invoke_smart('config --set ignore-all-recommends=1')
pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
for i in pkg_exclude.split():
self._invoke_smart('flag --set exclude-packages %s' % i)
# Optional debugging
# self._invoke_smart('config --set rpm-log-level=debug')
# cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
# self._invoke_smart(cmd)
ch_already_added = []
for canonical_arch in platform_extra:
arch = canonical_arch.split('-')[0]
arch_channel = os.path.join(self.deploy_dir, arch)
if os.path.exists(arch_channel) and not arch in ch_already_added:
bb.note('Note: adding Smart channel %s (%s)' %
(arch, channel_priority))
self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
% (arch, arch_channel))
self._invoke_smart('channel --set %s priority=%d' %
(arch, channel_priority))
channel_priority -= 5
ch_already_added.append(arch)
bb.note('adding Smart RPM DB channel')
self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
# Construct install scriptlet wrapper.
# Scripts need to be ordered when executed, this ensures numeric order.
# If we ever run into needing more the 899 scripts, we'll have to.
# change num to start with 1000.
#
if self.rpm_version == 4:
scriptletcmd = "$2 $3 $4\n"
scriptpath = "$3"
else:
scriptletcmd = "$2 $1/$3 $4\n"
scriptpath = "$1/$3"
SCRIPTLET_FORMAT = "#!/bin/bash\n" \
"\n" \
"export PATH=%s\n" \
"export D=%s\n" \
'export OFFLINE_ROOT="$D"\n' \
'export IPKG_OFFLINE_ROOT="$D"\n' \
'export OPKG_OFFLINE_ROOT="$D"\n' \
"export INTERCEPT_DIR=%s\n" \
"export NATIVE_ROOT=%s\n" \
"\n" \
+ scriptletcmd + \
"if [ $? -ne 0 ]; then\n" \
" if [ $4 -eq 1 ]; then\n" \
" mkdir -p $1/etc/rpm-postinsts\n" \
" num=100\n" \
" while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
" name=`head -1 " + scriptpath + " | cut -d\' \' -f 2`\n" \
' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
" cat " + scriptpath + " >> $1/etc/rpm-postinsts/${num}-${name}\n" \
" chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
" else\n" \
' echo "Error: pre/post remove scriptlet failed"\n' \
" fi\n" \
"fi\n"
intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
self.target_rootfs,
intercept_dir,
native_root)
open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
bb.note("Note: configuring RPM cross-install scriptlet_wrapper")
os.chmod(self.scriptlet_wrapper, 0755)
cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
self.scriptlet_wrapper
self._invoke_smart(cmd)
# Debug to show smart config info
# bb.note(self._invoke_smart('config --show'))
def update(self):
self._invoke_smart('update rpmsys')
'''
Install pkgs with smart, the pkg name is oe format
'''
def install(self, pkgs, attempt_only=False):
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
if attempt_only and len(pkgs) == 0:
return
pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
if not attempt_only:
bb.note('to be installed: %s' % ' '.join(pkgs))
cmd = "%s %s install -y %s" % \
(self.smart_cmd, self.smart_opt, ' '.join(pkgs))
bb.note(cmd)
else:
bb.note('installing attempt only packages...')
bb.note('Attempting %s' % ' '.join(pkgs))
cmd = "%s %s install --attempt -y %s" % \
(self.smart_cmd, self.smart_opt, ' '.join(pkgs))
try:
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to install packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
'''
Remove pkgs with smart, the pkg name is smart/rpm format
'''
def remove(self, pkgs, with_dependencies=True):
bb.note('to be removed: ' + ' '.join(pkgs))
if not with_dependencies:
cmd = "%s -e --nodeps " % self.rpm_cmd
cmd += "--root=%s " % self.target_rootfs
cmd += "--dbpath=/var/lib/rpm "
cmd += "--define='_cross_scriptlet_wrapper %s' " % \
self.scriptlet_wrapper
cmd += "--define='_tmppath /%s/tmp' %s" % (self.install_dir_name, ' '.join(pkgs))
else:
# for pkg in pkgs:
# bb.note('Debug: What required: %s' % pkg)
# bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
cmd = "%s %s remove -y %s" % (self.smart_cmd,
self.smart_opt,
' '.join(pkgs))
try:
bb.note(cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
bb.note(output)
except subprocess.CalledProcessError as e:
bb.note("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
def upgrade(self):
bb.note('smart upgrade')
self._invoke_smart('upgrade')
def write_index(self):
result = self.indexer.write_index()
if result is not None:
bb.fatal(result)
def remove_packaging_data(self):
bb.utils.remove(self.image_rpmlib, True)
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
True)
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
# remove temp directory
bb.utils.remove(self.install_dir_path, True)
def backup_packaging_data(self):
# Save the rpmlib for increment rpm image generation
if os.path.exists(self.saved_rpmlib):
bb.utils.remove(self.saved_rpmlib, True)
shutil.copytree(self.image_rpmlib,
self.saved_rpmlib,
symlinks=True)
def recovery_packaging_data(self):
# Move the rpmlib back
if os.path.exists(self.saved_rpmlib):
if os.path.exists(self.image_rpmlib):
bb.utils.remove(self.image_rpmlib, True)
bb.note('Recovery packaging data')
shutil.copytree(self.saved_rpmlib,
self.image_rpmlib,
symlinks=True)
def list_installed(self, format=None):
return self.pkgs_list.list(format)
'''
If incremental install, we need to determine what we've got,
what we need to add, and what to remove...
The dump_install_solution will dump and save the new install
solution.
'''
def dump_install_solution(self, pkgs):
bb.note('creating new install solution for incremental install')
if len(pkgs) == 0:
return
pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
install_pkgs = list()
cmd = "%s %s install -y --dump %s 2>%s" % \
(self.smart_cmd,
self.smart_opt,
' '.join(pkgs),
self.solution_manifest)
try:
# Disable rpmsys channel for the fake install
self._invoke_smart('channel --disable rpmsys')
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
with open(self.solution_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
if '@' in pkg:
install_pkgs.append(pkg)
except subprocess.CalledProcessError as e:
bb.note("Unable to dump install packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
# Recovery rpmsys channel
self._invoke_smart('channel --enable rpmsys')
return install_pkgs
'''
If incremental install, we need to determine what we've got,
what we need to add, and what to remove...
The load_old_install_solution will load the previous install
solution
'''
def load_old_install_solution(self):
bb.note('load old install solution for incremental install')
installed_pkgs = list()
if not os.path.exists(self.solution_manifest):
bb.note('old install solution not exist')
return installed_pkgs
with open(self.solution_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
if '@' in pkg:
installed_pkgs.append(pkg.strip())
return installed_pkgs
'''
Dump all available packages in feeds, it should be invoked after the
newest rpm index was created
'''
def dump_all_available_pkgs(self):
available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
available_pkgs = list()
cmd = "%s %s query --output %s" % \
(self.smart_cmd, self.smart_opt, available_manifest)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
with open(available_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
if '@' in pkg:
available_pkgs.append(pkg.strip())
except subprocess.CalledProcessError as e:
bb.note("Unable to list all available packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
self.fullpkglist = available_pkgs
return
def save_rpmpostinst(self, pkg):
mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
new_pkg = pkg
# Remove any multilib prefix from the package name
for mlib in mlibs:
if mlib in pkg:
new_pkg = pkg.replace(mlib + '-', '')
break
bb.note(' * postponing %s' % new_pkg)
saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
try:
bb.note(cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
bb.note(output)
os.chmod(saved_dir, 0755)
except subprocess.CalledProcessError as e:
bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
'''Write common configuration for target usage'''
def rpm_setup_smart_target_config(self):
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
True)
self._invoke_smart('config --set rpm-nolinktos=1')
self._invoke_smart('config --set rpm-noparentdirs=1')
for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
self._invoke_smart('flag --set ignore-recommends %s' % i)
self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
'''
The rpm db lock files were produced after invoking rpm to query on
build system, and they caused the rpm on target didn't work, so we
need to unlock the rpm db by removing the lock files.
'''
def unlock_rpm_db(self):
# Remove rpm db lock files
rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
for f in rpm_db_locks:
bb.utils.remove(f, True)
class OpkgPM(PackageManager):
def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
super(OpkgPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs)
self.opkg_args += self.d.getVar("OPKG_ARGS", True)
opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
bb.utils.mkdirhier(self.opkg_dir)
self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
self._create_config()
else:
self._create_custom_config()
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
"""
This function will change a package's status in /var/lib/opkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
def mark_packages(self, status_tag, packages=None):
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
os.rename(status_file + ".tmp", status_file)
def _create_custom_config(self):
bb.note("Building from feeds activated!")
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
feed_uri = feed_match.group(2)
bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
"""
Allow to use package deploy directory contents as quick devel-testing
feed. This creates individual feed configs for each arch subdir of those
specified as compatible for the current machine.
NOTE: Development-helper feature, NOT a full-fledged feed.
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
for arch in self.pkg_archs.split():
cfg_file_name = os.path.join(self.target_rootfs,
self.d.getVar("sysconfdir", True),
"opkg",
"local-%s-feed.conf" % arch)
with open(cfg_file_name, "w+") as cfg_file:
cfg_file.write("src/gz local-%s %s/%s" %
(arch,
self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
arch))
def _create_config(self):
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
config_file.write("src oe file:%s\n" % self.deploy_dir)
for arch in self.pkg_archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
if os.path.isdir(pkgs_dir):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
def insert_feeds_uris(self):
if self.feed_uris == "":
return
rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
% self.target_rootfs)
with open(rootfs_config, "w+") as config_file:
uri_iterator = 0
for uri in self.feed_uris.split():
config_file.write("src/gz url-%d %s/ipk\n" %
(uri_iterator, uri))
for arch in self.pkg_archs.split():
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
bb.note('Note: adding opkg channel url-%s-%d (%s)' %
(arch, uri_iterator, uri))
config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" %
(arch, uri_iterator, uri, arch))
uri_iterator += 1
def update(self):
self.deploy_dir_lock()
cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.deploy_dir_unlock()
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False):
if attempt_only and len(pkgs) == 0:
return
cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
"intercept_scripts")
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output)
except subprocess.CalledProcessError as e:
(bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output))
def remove(self, pkgs, with_dependencies=True):
if with_dependencies:
cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
else:
cmd = "%s %s --force-depends remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
try:
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def remove_packaging_data(self):
bb.utils.remove(self.opkg_dir, True)
# create the directory back, it's needed by PM lock
bb.utils.mkdirhier(self.opkg_dir)
def list_installed(self, format=None):
return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format)
def handle_bad_recommendations(self):
bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
if bad_recommendations.strip() == "":
return
status_file = os.path.join(self.opkg_dir, "status")
# If status file existed, it means the bad recommendations has already
# been handled
if os.path.exists(status_file):
return
cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
with open(status_file, "w+") as status:
for pkg in bad_recommendations.split():
pkg_info = cmd + pkg
try:
output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get package info. Command '%s' "
"returned %d:\n%s" % (pkg_info, e.returncode, e.output))
if output == "":
bb.note("Ignored bad recommendation: '%s' is "
"not a package" % pkg)
continue
for line in output.split('\n'):
if line.startswith("Status:"):
status.write("Status: deinstall hold not-installed\n")
else:
status.write(line + "\n")
# Append a blank line after each package entry to ensure that it
# is separated from the following entry
status.write("\n")
'''
The following function dummy installs pkgs and returns the log of output.
'''
def dummy_install(self, pkgs):
if len(pkgs) == 0:
return
# Create an temp dir as opkg root for dummy installation
temp_rootfs = self.d.expand('${T}/opkg')
temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
bb.utils.mkdirhier(temp_opkg_dir)
opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
opkg_args += self.d.getVar("OPKG_ARGS", True)
cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
# Dummy installation
cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
opkg_args,
' '.join(pkgs))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to dummy install packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
bb.utils.remove(temp_rootfs, True)
return output
def backup_packaging_data(self):
# Save the opkglib for increment ipk image generation
if os.path.exists(self.saved_opkg_dir):
bb.utils.remove(self.saved_opkg_dir, True)
shutil.copytree(self.opkg_dir,
self.saved_opkg_dir,
symlinks=True)
def recover_packaging_data(self):
# Move the opkglib back
if os.path.exists(self.saved_opkg_dir):
if os.path.exists(self.opkg_dir):
bb.utils.remove(self.opkg_dir, True)
bb.note('Recover packaging data')
shutil.copytree(self.saved_opkg_dir,
self.opkg_dir,
symlinks=True)
class DpkgPM(PackageManager):
def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
super(DpkgPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
else:
self.apt_conf_dir = apt_conf_dir
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
self.apt_args = d.getVar("APT_ARGS", True)
self.all_arch_list = archs.split()
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
self._create_configs(archs, base_archs)
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
"""
This function will change a package's status in /var/lib/dpkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
def mark_packages(self, status_tag, packages=None):
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
os.rename(status_file + ".tmp", status_file)
"""
Run the pre/post installs for package "package_name". If package_name is
None, then run all pre/post install scriptlets.
"""
def run_pre_post_installs(self, package_name=None):
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
status_file = self.target_rootfs + "/var/lib/dpkg/status"
installed_pkgs = []
with open(status_file, "r") as status:
for line in status.read().split('\n'):
m = re.match("^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
if package_name is not None and not package_name in installed_pkgs:
return
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
"intercept_scripts")
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
failed_pkgs = []
for pkg_name in installed_pkgs:
for suffix in suffixes:
p_full = os.path.join(info_dir, pkg_name + suffix[0])
if os.path.exists(p_full):
try:
bb.note("Executing %s for package: %s ..." %
(suffix[1].lower(), pkg_name))
subprocess.check_output(p_full, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.note("%s for package %s failed with %d:\n%s" %
(suffix[1], pkg_name, e.returncode, e.output))
failed_pkgs.append(pkg_name)
break
if len(failed_pkgs):
self.mark_packages("unpacked", failed_pkgs)
def update(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
self.deploy_dir_lock()
cmd = "%s update" % self.apt_get_cmd
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False):
if attempt_only and len(pkgs) == 0:
return
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
(self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
(bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output))
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
new_dir = re.sub("\.dpkg-new", "", dir)
if dir != new_dir:
os.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
new_file = re.sub("\.dpkg-new", "", file)
if file != new_file:
os.rename(os.path.join(root, file),
os.path.join(root, new_file))
def remove(self, pkgs, with_dependencies=True):
if with_dependencies:
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
else:
cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
" -P --force-depends %s" % \
(bb.utils.which(os.getenv('PATH'), "dpkg"),
self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def insert_feeds_uris(self):
if self.feed_uris == "":
return
sources_conf = os.path.join("%s/etc/apt/sources.list"
% self.target_rootfs)
arch_list = []
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
with open(sources_conf, "w+") as sources_file:
for uri in self.feed_uris.split():
for arch in arch_list:
bb.note('Note: adding dpkg channel at (%s)' % uri)
sources_file.write("deb %s/deb/%s ./\n" %
(uri, arch))
def _create_configs(self, archs, base_archs):
base_archs = re.sub("_", "-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
arch_list = []
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
priority = 801
for arch in arch_list:
prefs_file.write(
"Package: *\n"
"Pin: release l=%s\n"
"Pin-Priority: %d\n\n" % (arch, priority))
priority += 5
pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
for pkg in pkg_exclude.split():
prefs_file.write(
"Package: %s\n"
"Pin: release *\n"
"Pin-Priority: -1\n\n" % pkg)
arch_list.reverse()
with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
for arch in arch_list:
sources_file.write("deb file:%s/ ./\n" %
os.path.join(self.deploy_dir, arch))
base_arch_list = base_archs.split()
multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True);
for variant in multilib_variants.split():
if variant == "lib32":
base_arch_list.append("i386")
elif variant == "lib64":
base_arch_list.append("amd64")
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
match_arch = re.match(" Architecture \".*\";$", line)
architectures = ""
if match_arch:
for base_arch in base_arch_list:
architectures += "\"%s\";" % base_arch
apt_conf.write(" Architectures {%s};\n" % architectures);
apt_conf.write(" Architecture \"%s\";\n" % base_archs)
else:
line = re.sub("#ROOTFS#", self.target_rootfs, line)
line = re.sub("#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
open(os.path.join(target_dpkg_dir, "status"), "w+").close()
if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
open(os.path.join(target_dpkg_dir, "available"), "w+").close()
def remove_packaging_data(self):
bb.utils.remove(os.path.join(self.target_rootfs,
self.d.getVar('opkglibdir', True)), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Cannot fix broken dependencies. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
def list_installed(self, format=None):
return DpkgPkgsList(self.d, self.target_rootfs).list()
def generate_index_files(d):
classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
indexer_map = {
"rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
"ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
"deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
}
result = None
for pkg_class in classes:
if not pkg_class in indexer_map:
continue
if os.path.exists(indexer_map[pkg_class][1]):
result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
if result is not None:
bb.fatal(result)
if __name__ == "__main__":
"""
We should be able to run this as a standalone script, from outside bitbake
environment.
"""
"""
TBD
"""
| [] | [] | [
"APT_CONFIG",
"OFFLINE_ROOT",
"IPKG_OFFLINE_ROOT",
"INTERCEPT_DIR",
"RPM_ETCRPM",
"OPKG_OFFLINE_ROOT",
"NATIVE_ROOT",
"D",
"PATH"
] | [] | ["APT_CONFIG", "OFFLINE_ROOT", "IPKG_OFFLINE_ROOT", "INTERCEPT_DIR", "RPM_ETCRPM", "OPKG_OFFLINE_ROOT", "NATIVE_ROOT", "D", "PATH"] | python | 9 | 0 | |
app/email.py | import os
from flask import render_template
from flask_mail import Message
from app import create_app
from app import mail
def send_email(recipient, subject, template, **kwargs):
try:
app = os.getenv('APP_NAME', 'FLASK')
msg = Message(
subject + '' + app,
sender=os.getenv('MAIL_DEFAULT_SENDER', '[email protected]'),
recipients=[recipient])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
mail.send(msg)
return True
except Exception as e:
print('Failed to send email: ' + str(e))
return False
| [] | [] | [
"APP_NAME",
"MAIL_DEFAULT_SENDER"
] | [] | ["APP_NAME", "MAIL_DEFAULT_SENDER"] | python | 2 | 0 | |
Development/models/DWT2.py | import os
import cv2
import math
import pywt
import numpy as np
from utils import mse
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist, cifar10
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # for tensor flow warning
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
class DWT(layers.Layer):
def __init__(self, name='haar', **kwargs):
super(DWT, self).__init__(**kwargs)
self._name = self.name + "_" + name
# get filter coeffs from 3rd party lib
wavelet = pywt.Wavelet(name)
self.dec_len = wavelet.dec_len
# decomposition filter low pass and hight pass coeffs
db2_lpf = wavelet.dec_lo
db2_hpf = wavelet.dec_hi
# covert filters into tensors and reshape for convolution math
db2_lpf = tf.constant(db2_lpf[::-1])
self.db2_lpf = tf.reshape(db2_lpf, (1, wavelet.dec_len, 1, 1))
db2_hpf = tf.constant(db2_hpf[::-1])
self.db2_hpf = tf.reshape(db2_hpf, (1, wavelet.dec_len, 1, 1))
self.conv_type = "VALID"
self.border_padd = "SYMMETRIC"
def build(self, input_shape):
# filter dims should be bigger if input is not gray scale
if input_shape[-1] != 1:
self.db2_lpf = tf.repeat(self.db2_lpf, input_shape[-1], axis=-1)
self.db2_hpf = tf.repeat(self.db2_hpf, input_shape[-1], axis=-1)
def call(self, inputs, training=None, mask=None):
# border padding symatric add coulums
inputs_pad = tf.pad(inputs, [[0, 0], [0, 0], [self.dec_len-1, self.dec_len-1], [0, 0]], self.border_padd)
# approximation conv only rows
a = tf.nn.conv2d(
inputs_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],
)
# details conv only rows
d = tf.nn.conv2d(
inputs_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],
)
# ds - down sample
a_ds = a[:, :, 1:a.shape[2]:2, :]
d_ds = d[:, :, 1:d.shape[2]:2, :]
# border padding symatric add rows
a_ds_pad = tf.pad(a_ds, [[0, 0], [self.dec_len-1, self.dec_len-1], [0, 0], [0, 0]], self.border_padd)
d_ds_pad = tf.pad(d_ds, [[0, 0], [self.dec_len-1, self.dec_len-1], [0, 0], [0, 0]], self.border_padd)
# convolution is done on the rows so we need to
# transpose the matrix in order to convolve the colums
a_ds_pad = tf.transpose(a_ds_pad, perm=[0, 2, 1, 3])
d_ds_pad = tf.transpose(d_ds_pad, perm=[0, 2, 1, 3])
# aa approximation approximation
aa = tf.nn.conv2d(
a_ds_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],
)
# ad approximation details
ad = tf.nn.conv2d(
a_ds_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],
)
# ad details aproximation
da = tf.nn.conv2d(
d_ds_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],
)
# dd details details
dd = tf.nn.conv2d(
d_ds_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],
)
# transpose back the matrix
aa = tf.transpose(aa, perm=[0, 2, 1, 3])
ad = tf.transpose(ad, perm=[0, 2, 1, 3])
da = tf.transpose(da, perm=[0, 2, 1, 3])
dd = tf.transpose(dd, perm=[0, 2, 1, 3])
# down sample
ll = aa[:, 1:aa.shape[1]:2, :, :]
lh = ad[:, 1:ad.shape[1]:2, :, :]
hl = da[:, 1:da.shape[1]:2, :, :]
hh = dd[:, 1:dd.shape[1]:2, :, :]
# concate all outputs ionto tensor
x = tf.concat([ll, lh, hl, hh], axis=-1)
return x
class IDWT(layers.Layer):
def __init__(self, name='haar', **kwargs):
super(IDWT, self).__init__(**kwargs)
self._name = self.name + "_" + name
self.pad_type = "VALID"
self.border_pad = "SYMMETRIC"
# get filter coeffs from 3rd party lib
wavelet = pywt.Wavelet(name)
self.rec_len = wavelet.rec_len
# decomposition filter low pass and hight pass coeffs
db2_lpf = wavelet.rec_lo
db2_hpf = wavelet.rec_hi
# covert filters into tensors and reshape for convolution math
db2_lpf = tf.constant(db2_lpf[::-1])
self.db2_lpf = tf.reshape(db2_lpf, (1, wavelet.rec_len, 1, 1))
db2_hpf = tf.constant(db2_hpf[::-1])
self.db2_hpf = tf.reshape(db2_hpf, (1, wavelet.rec_len, 1, 1))
def upsampler2d(self, x):
"""
up sampling with zero insertion between rows and columns
:param x: 4 dim tensor (?, w, h, ch)
:return: up sampled tensor with shape (?, 2*w, 2*h, ch)
"""
# create zero like tensor
zero_tensor = tf.zeros_like(x)
# stack both tensors
stack_rows = tf.stack([x, zero_tensor], axis=3)
# reshape for zero insertion between the rows
stack_rows = tf.reshape(stack_rows, shape=[-1, x.shape[1], x.shape[2]*2, x.shape[3]])
# transpose in order to insert zeros for the columns
stack_rows = tf.transpose(stack_rows, perm=[0, 2, 1, 3])
# create zero like tensor but now like the padded one
zero_tensor_1 = tf.zeros_like(stack_rows)
# stack both tensors
stack_rows_cols = tf.stack([stack_rows, zero_tensor_1], axis=3)
# reshape for zero insertion between the columns
us_padded = tf.reshape(stack_rows_cols, shape=[-1, x.shape[1]*2, x.shape[2]*2, x.shape[3]])
# transpose back to normal
us_padded = tf.transpose(us_padded, perm=[0, 2, 1, 3])
return us_padded
def call(self, inputs, training=None, mask=None):
# border padding for convolution with low pass and high pass filters
x = tf.pad(inputs,
[[0, 0], [self.rec_len-1, self.rec_len-1], [self.rec_len-1, self.rec_len-1], [0, 0]],
self.border_pad)
# convert to float32
# x = tf.cast(x, tf.float32)
# GPU works with float 32
# CPU can work with 64 but need to add extra flag
# convert to float64
# x = tf.cast(x, tf.float64)
# extract approximation and details from input tensor
# TODO: whit if tensor shape is bigger then 4?
# and expand the dims for the up sampling
ll = tf.expand_dims(x[:, :, :, 0], axis=-1)
lh = tf.expand_dims(x[:, :, :, 1], axis=-1)
hl = tf.expand_dims(x[:, :, :, 2], axis=-1)
hh = tf.expand_dims(x[:, :, :, 3], axis=-1)
ll_us_pad = self.upsampler2d(ll)
lh_us_pad = self.upsampler2d(lh)
hl_us_pad = self.upsampler2d(hl)
hh_us_pad = self.upsampler2d(hh)
# convolution for the rows
# transpose for the column convolution
# convolution for the column
# transpose back to normal
ll_conv_lpf = tf.nn.conv2d(ll_us_pad, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
ll_conv_lpf_tr = tf.transpose(ll_conv_lpf, perm=[0, 2, 1, 3])
ll_conv_lpf_lpf = tf.nn.conv2d(ll_conv_lpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
ll_conv_lpf_lpf_tr = tf.transpose(ll_conv_lpf_lpf, perm=[0, 2, 1, 3])
lh_conv_lpf = tf.nn.conv2d(lh_us_pad, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
lh_conv_lpf_tr = tf.transpose(lh_conv_lpf, perm=[0, 2, 1, 3])
lh_conv_lpf_hpf = tf.nn.conv2d(lh_conv_lpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
lh_conv_lpf_hpf_tr = tf.transpose(lh_conv_lpf_hpf, perm=[0, 2, 1, 3])
hl_conv_hpf = tf.nn.conv2d(hl_us_pad, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
hl_conv_hpf_tr = tf.transpose(hl_conv_hpf, perm=[0, 2, 1, 3])
hl_conv_hpf_lpf = tf.nn.conv2d(hl_conv_hpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
hl_conv_hpf_lpf_tr = tf.transpose(hl_conv_hpf_lpf, perm=[0, 2, 1, 3])
hh_conv_hpf = tf.nn.conv2d(hh_us_pad, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
hh_conv_hpf_tr = tf.transpose(hh_conv_hpf, perm=[0, 2, 1, 3])
hh_conv_hpf_hpf = tf.nn.conv2d(hh_conv_hpf_tr, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )
hh_conv_hpf_hpf_tr = tf.transpose(hh_conv_hpf_hpf, perm=[0, 2, 1, 3])
# add all together
reconstructed = tf.add_n([ll_conv_lpf_lpf_tr,
lh_conv_lpf_hpf_tr,
hl_conv_hpf_lpf_tr,
hh_conv_hpf_hpf_tr])
# crop the paded part
crop = (self.rec_len -1)*2
return reconstructed[:, crop-1:-crop, crop-1:-crop, :]
if __name__ == "__main__":
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
# x_train = x_train.astype("float32")
# x_test = x_test.astype("float32")
# # x_train = cv2.imread("../input/LennaGrey.png", 0)
# frog = tf.expand_dims(
# x_train[0, :, :, :], 0, name=None
# )
# print("frog shape", frog.shape)
# model = keras.Sequential()
# model.add(keras.Input(shape=(256, 256, 4)))
# model.add(IDWT())
# model.summary()
name = "db2"
img = cv2.imread("../input/LennaGrey.png",0)
img_ex1 = np.expand_dims(img, axis=-1)
img_ex2 = np.expand_dims(img_ex1, axis=0)
model = keras.Sequential()
model.add(layers.InputLayer(input_shape=img_ex1.shape))
model.add(DWT(name=name))
# model.summary()
coeffs = model.predict(img_ex2)
LL = coeffs[0, ..., 0]
LH = coeffs[0, ..., 1]
HL = coeffs[0, ..., 2]
HH = coeffs[0, ..., 3]
model = keras.Sequential()
model.add(layers.InputLayer(input_shape=coeffs[0].shape))
model.add(IDWT(name=name))
model.summary()
my_recon = model.predict(coeffs)
img_my_rec = my_recon[0, :, :, 0]
coeffs2 = pywt.wavedec2(img, name,level=1)
LL2 = coeffs2[0]
LH2 = coeffs2[1][0]
HL2 = coeffs2[1][1]
HH2 = coeffs2[1][2]
recon_pywt = pywt.waverec2(coeffs2, name)
img_pywt_rec = recon_pywt
print("LL mse ", mse.mse(LL, LL2))
print("LH mse ", mse.mse(LH, LH2))
print("HL mse ", mse.mse(HL, HL2))
print("HH mse ", mse.mse(HH, HH2))
print("img mse ", mse.mse(img_pywt_rec, img_my_rec))
difference = cv2.absdiff(np.int32(img_my_rec), np.int32(img_pywt_rec))
_, mask = cv2.threshold(difference.astype("uint8"), 0, 255, cv2.THRESH_BINARY)
cv2.imshow("diff", mask)
cv2.waitKey(0)
pass
# a = model.predict(frog, steps=1)
# #
# approx = tf.image.convert_image_dtype(a[0, ..., 0], dtype=tf.float32)
# with tf.Session() as sess:
# img = sess.run(approx)
# # pass
# #
# img = np.clip(img, 0, 255)
# img = np.ceil(img)
# img = img.astype("uint8")
# with open(r"D:\TEMP\LL_python_layer.raw", "wb") as outfile:
# outfile.write(img) # Write it
# model = models.WaveletCifar10CNN.WaveletCNN((32,32,3), 10)
# model.summary() | [] | [] | [
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
main.py | import os
from dotenv import load_dotenv
from app.utils.cache import Cache
from app import ApplicationFactory
load_dotenv()
with open('./AppleMusicAuthKey.p8', 'r') as f:
os.environ['APPLE_KEY'] = f.read()
TITLE = 'Sharify'
DESCRIPTION = ''
DEBUG = os.environ.get('APP_DEBUG') or False
Cache.instance().init()
app = ApplicationFactory(TITLE, DESCRIPTION).create(debug=DEBUG)
| [] | [] | [
"APP_DEBUG",
"APPLE_KEY"
] | [] | ["APP_DEBUG", "APPLE_KEY"] | python | 2 | 0 | |
registry/memory_test.go | package registry
import (
"fmt"
"os"
"testing"
"time"
)
var (
testData = map[string][]*Service{
"foo": {
{
Name: "foo",
Version: "1.0.0",
Nodes: []*Node{
{
Id: "foo-1.0.0-123",
Address: "localhost:9999",
},
{
Id: "foo-1.0.0-321",
Address: "localhost:9999",
},
},
},
{
Name: "foo",
Version: "1.0.1",
Nodes: []*Node{
{
Id: "foo-1.0.1-321",
Address: "localhost:6666",
},
},
},
{
Name: "foo",
Version: "1.0.3",
Nodes: []*Node{
{
Id: "foo-1.0.3-345",
Address: "localhost:8888",
},
},
},
},
"bar": {
{
Name: "bar",
Version: "default",
Nodes: []*Node{
{
Id: "bar-1.0.0-123",
Address: "localhost:9999",
},
{
Id: "bar-1.0.0-321",
Address: "localhost:9999",
},
},
},
{
Name: "bar",
Version: "latest",
Nodes: []*Node{
{
Id: "bar-1.0.1-321",
Address: "localhost:6666",
},
},
},
},
}
)
func TestMemoryRegistry(t *testing.T) {
m := NewMemoryRegistry()
fn := func(k string, v []*Service) {
services, err := m.GetService(k)
if err != nil {
t.Errorf("Unexpected error getting service %s: %v", k, err)
}
if len(services) != len(v) {
t.Errorf("Expected %d services for %s, got %d", len(v), k, len(services))
}
for _, service := range v {
var seen bool
for _, s := range services {
if s.Version == service.Version {
seen = true
break
}
}
if !seen {
t.Errorf("expected to find version %s", service.Version)
}
}
}
// register data
for _, v := range testData {
serviceCount := 0
for _, service := range v {
if err := m.Register(service); err != nil {
t.Errorf("Unexpected register error: %v", err)
}
serviceCount++
// after the service has been registered we should be able to query it
services, err := m.GetService(service.Name)
if err != nil {
t.Errorf("Unexpected error getting service %s: %v", service.Name, err)
}
if len(services) != serviceCount {
t.Errorf("Expected %d services for %s, got %d", serviceCount, service.Name, len(services))
}
}
}
// using test data
for k, v := range testData {
fn(k, v)
}
services, err := m.ListServices()
if err != nil {
t.Errorf("Unexpected error when listing services: %v", err)
}
totalServiceCount := 0
for _, testSvc := range testData {
for range testSvc {
totalServiceCount++
}
}
if len(services) != totalServiceCount {
t.Errorf("Expected total service count: %d, got: %d", totalServiceCount, len(services))
}
// deregister
for _, v := range testData {
for _, service := range v {
if err := m.Deregister(service); err != nil {
t.Errorf("Unexpected deregister error: %v", err)
}
}
}
// after all the service nodes have been deregistered we should not get any results
for _, v := range testData {
for _, service := range v {
services, err := m.GetService(service.Name)
if err != ErrNotFound {
t.Errorf("Expected error: %v, got: %v", ErrNotFound, err)
}
if len(services) != 0 {
t.Errorf("Expected %d services for %s, got %d", 0, service.Name, len(services))
}
}
}
}
func TestMemoryRegistryTTL(t *testing.T) {
m := NewMemoryRegistry()
for _, v := range testData {
for _, service := range v {
if err := m.Register(service, RegisterTTL(time.Millisecond)); err != nil {
t.Fatal(err)
}
}
}
time.Sleep(ttlPruneTime * 2)
for name := range testData {
svcs, err := m.GetService(name)
if err != nil {
t.Fatal(err)
}
for _, svc := range svcs {
if len(svc.Nodes) > 0 {
t.Fatalf("Service %q still has nodes registered", name)
}
}
}
}
func TestMemoryRegistryTTLConcurrent(t *testing.T) {
concurrency := 1000
waitTime := ttlPruneTime * 2
m := NewMemoryRegistry()
for _, v := range testData {
for _, service := range v {
if err := m.Register(service, RegisterTTL(waitTime/2)); err != nil {
t.Fatal(err)
}
}
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("test will wait %v, then check TTL timeouts", waitTime)
}
errChan := make(chan error, concurrency)
syncChan := make(chan struct{})
for i := 0; i < concurrency; i++ {
go func() {
<-syncChan
for name := range testData {
svcs, err := m.GetService(name)
if err != nil {
errChan <- err
return
}
for _, svc := range svcs {
if len(svc.Nodes) > 0 {
errChan <- fmt.Errorf("Service %q still has nodes registered", name)
return
}
}
}
errChan <- nil
}()
}
time.Sleep(waitTime)
close(syncChan)
for i := 0; i < concurrency; i++ {
if err := <-errChan; err != nil {
t.Fatal(err)
}
}
}
| [
"\"IN_TRAVIS_CI\""
] | [] | [
"IN_TRAVIS_CI"
] | [] | ["IN_TRAVIS_CI"] | go | 1 | 0 | |
backend/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demoapp_33978.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
tests/stores/test_mongolike.py | import os
import pytest
import mongomock.collection
import pymongo.collection
from datetime import datetime
from maggma.core import StoreError
from maggma.stores import MongoStore, MemoryStore, JSONStore, MongoURIStore
from maggma.validators import JSONSchemaValidator
@pytest.fixture
def mongostore():
store = MongoStore("maggma_test", "test")
store.connect()
yield store
store._collection.drop()
@pytest.fixture
def memorystore():
store = MemoryStore()
store.connect()
return store
@pytest.fixture
def jsonstore(test_dir):
files = []
for f in ["a.json", "b.json"]:
files.append(test_dir / "test_set" / f)
return JSONStore(files)
def test_mongostore_connect():
mongostore = MongoStore("maggma_test", "test")
assert mongostore._collection is None
mongostore.connect()
assert isinstance(mongostore._collection, pymongo.collection.Collection)
def test_mongostore_connect_via_ssh():
mongostore = MongoStore("maggma_test", "test")
class fake_pipe:
remote_bind_address = ("localhost", 27017)
local_bind_address = ("localhost", 37017)
server = fake_pipe()
mongostore.connect(ssh_tunnel=server)
assert isinstance(mongostore._collection, pymongo.collection.Collection)
def test_mongostore_query(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
assert mongostore.query_one(properties=["a"])["a"] == 1
assert mongostore.query_one(properties=["a"])["a"] == 1
assert mongostore.query_one(properties=["b"])["b"] == 2
assert mongostore.query_one(properties=["c"])["c"] == 3
def test_mongostore_count(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
assert mongostore.count() == 1
mongostore._collection.insert_one({"aa": 1, "b": 2, "c": 3})
assert mongostore.count() == 2
assert mongostore.count({"a": 1}) == 1
def test_mongostore_distinct(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
mongostore._collection.insert_one({"a": 4, "d": 5, "e": 6, "g": {"h": 1}})
assert set(mongostore.distinct("a")) == {1, 4}
# Test list distinct functionality
mongostore._collection.insert_one({"a": 4, "d": 6, "e": 7})
mongostore._collection.insert_one({"a": 4, "d": 6, "g": {"h": 2}})
# Test distinct subdocument functionality
ghs = mongostore.distinct("g.h")
assert set(ghs) == {1, 2}
# Test when key doesn't exist
assert mongostore.distinct("blue") == []
# Test when null is a value
mongostore._collection.insert_one({"i": None})
assert mongostore.distinct("i") == [None]
def test_mongostore_update(mongostore):
mongostore.update({"e": 6, "d": 4}, key="e")
assert (
mongostore.query_one(criteria={"d": {"$exists": 1}}, properties=["d"])["d"] == 4
)
mongostore.update([{"e": 7, "d": 8, "f": 9}], key=["d", "f"])
assert mongostore.query_one(criteria={"d": 8, "f": 9}, properties=["e"])["e"] == 7
mongostore.update([{"e": 11, "d": 8, "f": 9}], key=["d", "f"])
assert mongostore.query_one(criteria={"d": 8, "f": 9}, properties=["e"])["e"] == 11
test_schema = {
"type": "object",
"properties": {"e": {"type": "integer"}},
"required": ["e"],
}
mongostore.validator = JSONSchemaValidator(schema=test_schema)
mongostore.update({"e": 100, "d": 3}, key="e")
# Non strict update
mongostore.update({"e": "abc", "d": 3}, key="e")
def test_mongostore_groupby(mongostore):
mongostore.update(
[
{"e": 7, "d": 9, "f": 9},
{"e": 7, "d": 9, "f": 10},
{"e": 8, "d": 9, "f": 11},
{"e": 9, "d": 10, "f": 12},
],
key="f",
)
data = list(mongostore.groupby("d"))
assert len(data) == 2
grouped_by_9 = [g[1] for g in data if g[0]["d"] == 9][0]
assert len(grouped_by_9) == 3
grouped_by_10 = [g[1] for g in data if g[0]["d"] == 10][0]
assert len(grouped_by_10) == 1
data = list(mongostore.groupby(["e", "d"]))
assert len(data) == 3
def test_mongostore_remove_docs(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
mongostore._collection.insert_one({"a": 4, "d": 5, "e": 6, "g": {"h": 1}})
mongostore.remove_docs({"a": 1})
assert len(list(mongostore.query({"a": 4}))) == 1
assert len(list(mongostore.query({"a": 1}))) == 0
def test_mongostore_from_db_file(mongostore, db_json):
ms = MongoStore.from_db_file(db_json)
ms.connect()
assert ms._collection.full_name == "maggma_tests.tmp"
def test_mongostore_from_collection(mongostore, db_json):
ms = MongoStore.from_db_file(db_json)
ms.connect()
other_ms = MongoStore.from_collection(ms._collection)
assert ms._collection.full_name == other_ms._collection.full_name
assert ms.database == other_ms.database
def test_mongostore_name(mongostore):
assert mongostore.name == "mongo://localhost/maggma_test/test"
def test_ensure_index(mongostore):
assert mongostore.ensure_index("test_key")
# TODO: How to check for exception?
def test_mongostore_last_updated(mongostore):
assert mongostore.last_updated == datetime.min
start_time = datetime.utcnow()
mongostore._collection.insert_one({mongostore.key: 1, "a": 1})
with pytest.raises(StoreError) as cm:
mongostore.last_updated
assert cm.match(mongostore.last_updated_field)
mongostore.update(
[{mongostore.key: 1, "a": 1, mongostore.last_updated_field: datetime.utcnow()}]
)
assert mongostore.last_updated > start_time
def test_mongostore_newer_in(mongostore):
target = MongoStore("maggma_test", "test_target")
target.connect()
# make sure docs are newer in mongostore then target and check updated_keys
target.update(
[
{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()}
for i in range(10)
]
)
# Update docs in source
mongostore.update(
[
{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()}
for i in range(10)
]
)
assert len(target.newer_in(mongostore)) == 10
assert len(target.newer_in(mongostore, exhaustive=True)) == 10
assert len(mongostore.newer_in(target)) == 0
target._collection.drop()
# Memory store tests
def test_memory_store_connect():
memorystore = MemoryStore()
assert memorystore._collection is None
memorystore.connect()
assert isinstance(memorystore._collection, mongomock.collection.Collection)
with pytest.warns(UserWarning, match="SSH Tunnel not needed for MemoryStore"):
class fake_pipe:
remote_bind_address = ("localhost", 27017)
local_bind_address = ("localhost", 37017)
server = fake_pipe()
memorystore.connect(ssh_tunnel=server)
def test_groupby(memorystore):
memorystore.update(
[
{"e": 7, "d": 9, "f": 9},
{"e": 7, "d": 9, "f": 10},
{"e": 8, "d": 9, "f": 11},
{"e": 9, "d": 10, "f": 12},
],
key="f",
)
data = list(memorystore.groupby("d"))
assert len(data) == 2
grouped_by_9 = [g[1] for g in data if g[0]["d"] == 9][0]
assert len(grouped_by_9) == 3
grouped_by_10 = [g[1] for g in data if g[0]["d"] == 10][0]
assert len(grouped_by_10) == 1
data = list(memorystore.groupby(["e", "d"]))
assert len(data) == 3
memorystore.update(
[
{"e": {"d": 9}, "f": 9},
{"e": {"d": 9}, "f": 10},
{"e": {"d": 9}, "f": 11},
{"e": {"d": 10}, "f": 12},
],
key="f",
)
data = list(memorystore.groupby("e.d"))
assert len(data) == 2
def test_json_store_load(jsonstore, test_dir):
jsonstore.connect()
assert len(list(jsonstore.query())) == 20
jsonstore = JSONStore(test_dir / "test_set" / "c.json.gz")
jsonstore.connect()
assert len(list(jsonstore.query())) == 20
def test_eq(mongostore, memorystore, jsonstore):
assert mongostore == mongostore
assert memorystore == memorystore
assert jsonstore == jsonstore
assert mongostore != memorystore
assert mongostore != jsonstore
assert memorystore != jsonstore
@pytest.mark.skipif(
"mongodb+srv" not in os.environ.get("MONGODB_SRV_URI", ""),
reason="requires special mongodb+srv URI",
)
def test_mongo_uri():
uri = os.environ["MONGODB_SRV_URI"]
store = MongoURIStore(uri, database="mp_core", collection_name="xas")
store.connect()
is_name = store.name is uri
# This is try and keep the secret safe
assert is_name
with pytest.warns(UserWarning, match="SSH Tunnel not needed for MongoURIStore"):
class fake_pipe:
remote_bind_address = ("localhost", 27017)
local_bind_address = ("localhost", 37017)
server = fake_pipe()
store.connect(ssh_tunnel=server)
| [] | [] | [
"MONGODB_SRV_URI"
] | [] | ["MONGODB_SRV_URI"] | python | 1 | 0 | |
src/main/java/com/github/sergueik/utils/ChromePagePerformanceUtil.java | package com.github.sergueik.utils;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.edge.EdgeDriver;
import org.openqa.selenium.support.ui.ExpectedCondition;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.WebDriverWait;
/**
* Page timing Javascript utilities supported by Chome browser, and partially, by Firefox, IE and Edge
* @author: Serguei Kouzmine ([email protected])
*/
public class ChromePagePerformanceUtil {
static String browser = "chrome";
public static void setBrowser(String browser) {
ChromePagePerformanceUtil.browser = browser;
}
private static String osName = CommonUtils.getOSName();
private static String performanceTimerScript = String.format(
"%s\nreturn window.timing.getTimes();",
getScriptContent("performance_script.js"));
private static String performanceNetworkScript = String.format(
"%s\nreturn window.timing.getNetwork({stringify:true});",
getScriptContent("performance_script.js"));
private static final String browserDriverPath = osName.contains("windows")
? String.format("%s/Downloads", System.getenv("USERPROFILE"))
: String.format("%s/Downloads", System.getenv("HOME"));
private static final Map<String, String> browserDrivers = new HashMap<>();
static {
browserDrivers.put("chrome",
osName.contains("windows") ? "chromedriver.exe" : "chromedriver");
browserDrivers.put("firefox",
osName.contains("windows") ? "geckodriver.exe" : "geckodriver");
browserDrivers.put("edge", "MicrosoftWebDriver.exe");
}
private static final Map<String, String> browserDriverProperties = new HashMap<>();
static {
browserDriverProperties.put("chrome", "webdriver.chrome.driver");
browserDriverProperties.put("firefox", "webdriver.gecko.driver");
browserDriverProperties.put("edge", "webdriver.edge.driver");
}
private static final String simplePerformanceTimingsScript = "var performance = window.performancevar timings = performance.timing;"
+ "return timings;";
private Map<String, Double> pageElementTimers;
public Map<String, Double> getPageElementTimers() {
return pageElementTimers;
}
private Map<String, Double> pageEventTimers;
public Map<String, Double> getPageEventTimers() {
return pageEventTimers;
}
private static boolean debug = false;
public void setDebug(boolean debug) {
ChromePagePerformanceUtil.debug = debug;
}
private int flexibleWait = 30;
public int getFlexibleWait() {
return flexibleWait;
}
public void setFlexibleWait(int flexibleWait) {
this.flexibleWait = flexibleWait;
}
private static ChromePagePerformanceUtil ourInstance = new ChromePagePerformanceUtil();
public static ChromePagePerformanceUtil getInstance() {
return ourInstance;
}
private ChromePagePerformanceUtil() {
}
public double getLoadTime(WebDriver driver, String endUrl) {
WebDriverWait wait = new WebDriverWait(driver, flexibleWait);
driver.navigate().to(endUrl);
waitPageToLoad(driver, wait);
setTimer(driver);
return calculateLoadTime();
}
public double getLoadTime(WebDriver driver, By navigator) {
WebDriverWait wait = new WebDriverWait(driver, flexibleWait);
wait.until(ExpectedConditions.presenceOfElementLocated(navigator)).click();
waitPageToLoad(driver, wait);
setTimer(driver);
return calculateLoadTime();
}
public double getLoadTime(WebDriver driver, String endUrl, By navigator) {
WebDriverWait wait = new WebDriverWait(driver, flexibleWait);
driver.navigate().to(endUrl);
wait.until(ExpectedConditions.presenceOfElementLocated(navigator)).click();
waitPageToLoad(driver, wait);
setTimer(driver);
setTimerNew(driver);
return calculateLoadTime();
}
public double getLoadTime(String endUrl) {
WebDriver driver = null;
System.setProperty(browserDriverProperties.get(browser),
osName.contains("windows")
? new File(String.format("%s/%s", browserDriverPath,
browserDrivers.get(browser))).getAbsolutePath()
: String.format("%s/%s", browserDriverPath,
browserDrivers.get(browser)));
System.err.println("browser: " + browser);
if (browser.contains("edge")) {
// http://www.automationtestinghub.com/selenium-3-launch-microsoft-edge-with-microsoftwebdriver/
// This version of MicrosoftWebDriver.exe is not compatible with the
// installed version of Windows 10.
// observed with Windows 10 build 15063 (10.0.15063.0),
// MicrosoftWebDriver.exe build 17134 (10.0.17134.1)).
//
try {
driver = new EdgeDriver();
} catch (Exception e) {
System.err.println("Exception (ignord): " + e.toString());
}
} else {
driver = new ChromeDriver();
}
WebDriverWait wait = new WebDriverWait(driver, flexibleWait);
driver.navigate().to(endUrl);
waitPageToLoad(driver, wait);
setTimer(driver);
// setTimerNew(driver);
return calculateLoadTime();
}
public double getLoadTime(String endUrl, By by) {
WebDriver driver = new ChromeDriver();
WebDriverWait wait = new WebDriverWait(driver, flexibleWait);
driver.navigate().to(endUrl);
if (by != null) {
wait.until(ExpectedConditions.presenceOfElementLocated(by)).click();
}
waitPageToLoad(driver, wait);
setTimer(driver);
return calculateLoadTime();
}
private void waitPageToLoad(WebDriver driver, WebDriverWait wait) {
wait.until(new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
return ((JavascriptExecutor) driver)
.executeScript("return document.readyState").toString()
.equals("complete");
}
});
}
private void setTimer(WebDriver driver) {
String result = ((JavascriptExecutor) driver)
.executeScript(
performanceTimerScript /* simplePerformanceTimingsScript */)
.toString();
if (result == null) {
throw new RuntimeException("result is null");
}
if (debug) {
System.err.println("Processing result: " + result);
}
this.pageEventTimers = createDateMap(result);
}
private double calculateLoadTime() {
return pageEventTimers.get("unloadEventStart");
}
// Example data:
// payload = "[{redirectCount=0, encodedBodySize=64518, unloadEventEnd=0,
// responseEnd=4247.699999992619, domainLookupEnd=2852.7999999932945,
// unloadEventStart=0, domContentLoadedEventStart=4630.699999994249,
// type=navigate, decodedBodySize=215670, duration=5709.000000002561,
// redirectStart=0, connectEnd=3203.5000000032596, toJSON={},
// requestStart=3205.499999996391, initiatorType=beacon}]";
// TODO: use org.json
public static Map<String, Double> createDateMap(String payload) {
Map<String, Double> eventData = new HashMap<>();
Date currDate = new Date();
payload = payload.substring(1, payload.length() - 1);
String[] pairs = payload.split(",");
for (String pair : pairs) {
String[] values = pair.split("=");
if (values[0].trim().toLowerCase().compareTo("tojson") != 0) {
if (debug) {
System.err.println("Collecting: " + pair);
}
eventData.put(values[0].trim(),
((currDate.getTime() - Long.valueOf(values[1]))) / 1000.0);
}
}
return eventData;
}
// for simple calculation
// compute the difference between
// Load Event End and Navigation Event Start as
// Page Load Time
// origin:
// https://github.com/janaavula/Selenium-Response-Time/blob/master/src/navtimer/Navigation.java
public static long timerOperation(WebDriver driver, String comment) {
JavascriptExecutor js = (JavascriptExecutor) driver;
long loadEventEnd = (Long) js
.executeScript("return window. performance.timing.loadEventEnd;");
long navigationStart = (Long) js
.executeScript("return window. performance.timing.navigationStart;");
// System.out.println("Navigation start is " + (navigationStart) + " milli
// seconds.");
// System.out.println("Load Event end is " + (loadEventEnd) + " milli
// seconds.");
long pageLoadTime = loadEventEnd - navigationStart;
if (debug)
System.err.println(comment + " Load Time is " + pageLoadTime + " ms");
return pageLoadTime;
}
private Map<String, Double> createDateMapFromJSON(String payload)
throws JSONException {
if (debug) {
System.err.println("payload: " + payload);
}
List<Map<String, String>> result = new ArrayList<>();
// select columns to collect
Pattern columnSelectionattern = Pattern.compile("(?:name|duration)");
// ignore page events
List<String> events = new ArrayList<>(Arrays.asList(new String[] {
"first-contentful-paint", "first-paint", "intentmedia.all.end",
"intentmedia.all.start", "intentmedia.core.fetch.page.request",
"intentmedia.core.fetch.page.response", "intentmedia.core.init.end",
"intentmedia.core.init.start", "intentmedia.core.newPage.end",
"intentmedia.core.newPage.start", "intentmedia.core.scriptLoader.end",
"intentmedia.core.scriptLoader.start",
"intentmedia.sca.fetch.config.request",
"intentmedia.sca.fetch.config.response" }));
Pattern nameSelectionPattern = Pattern
.compile(String.format("(?:%s)", String.join("|", events)));
JSONArray jsonData = new JSONArray(payload);
for (int row = 0; row < jsonData.length(); row++) {
JSONObject jsonObj = new JSONObject(jsonData.get(row).toString());
// assertThat(jsonObj, notNullValue());
Iterator<String> dataKeys = jsonObj.keys();
Map<String, String> dataRow = new HashMap<>();
while (dataKeys.hasNext()) {
String dataKey = dataKeys.next();
if (columnSelectionattern.matcher(dataKey).find()) {
dataRow.put(dataKey, jsonObj.get(dataKey).toString());
}
}
// only collect page elements, skip events
if (!nameSelectionPattern.matcher(dataRow.get("name")).find()) {
result.add(dataRow);
}
}
assertTrue(result.size() > 0);
System.err.println(String.format("Added %d rows", result.size()));
if (debug) {
for (Map<String, String> resultRow : result) {
Set<String> dataKeys = resultRow.keySet();
for (String dataKey : dataKeys) {
System.err.println(dataKey + " = " + resultRow.get(dataKey));
}
}
}
Map<String, Double> pageObjectTimers = new HashMap<>();
for (Map<String, String> row : result) {
try {
pageObjectTimers.put(row.get("name"),
java.lang.Double.parseDouble(row.get("duration")) / 1000.0);
} catch (NumberFormatException e) {
pageObjectTimers.put(row.get("name"), 0.0);
}
}
if (debug) {
Set<String> names = pageObjectTimers.keySet();
for (String name : names) {
System.err.println(name + " = " + pageObjectTimers.get(name));
}
}
return pageObjectTimers;
}
private void setTimerNew(WebDriver driver) {
this.pageElementTimers = createDateMapFromJSON(((JavascriptExecutor) driver)
.executeScript(performanceNetworkScript).toString());
}
protected static String getScriptContent(String scriptName) {
try {
final InputStream stream = ChromePagePerformanceUtil.class
.getClassLoader().getResourceAsStream(scriptName);
final byte[] bytes = new byte[stream.available()];
stream.read(bytes);
return new String(bytes, "UTF-8");
} catch (IOException e) {
throw new RuntimeException(scriptName);
}
}
}
| [
"\"USERPROFILE\"",
"\"HOME\""
] | [] | [
"HOME",
"USERPROFILE"
] | [] | ["HOME", "USERPROFILE"] | java | 2 | 0 | |
ms2ldaviz/fixfs.py | import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
import numpy as np
import bisect
import jsonpickle
from basicviz.models import Experiment,Feature,FeatureInstance,Mass2Motif,Mass2MotifInstance,Alpha,Document,BVFeatureSet
if __name__ == '__main__':
es = Experiment.objects.all()
for e in es:
if e.featureset == None:
print e
docs = Document.objects.filter(experiment = e)
if len(docs) > 0:
doc = docs[0]
fl = []
fl = FeatureInstance.objects.filter(document = doc)
if len(fl)>0:
fs = fl[0].feature.featureset
if fs:
e.featureset = fs
e.save()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
vote_project/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vote_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
EOD_api/test_EOD_api.py | import os
import re
import datetime
import unittest
from io import StringIO
from unittest.mock import patch
import pandas as pd
import EOD_api as eod
TOKEN = os.environ["EOD_TOKEN"]
def date_parser(string):
date_pattern = re.compile("([0-9]{4}-[0-9]{2}-[0-9]{2})[ ]", re.VERBOSE)
return date_pattern.sub(r"\1T", string)
class TestGetEod(unittest.TestCase):
# @classmethod
# def setUp(cls):
# pass
# def tearDown(cls):
# pass
def test_idempotent__addtickers(self):
d1 = eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
).add_tickers(["MSFT.US"])
d2 = (
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
)
.add_tickers(["MSFT.US"])
.add_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_idempotent_truncate_dates(self):
d1 = eod.Fundamental(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).truncate_dates("2020-10-14", "2020-10-16")
d2 = (
eod.Fundamental(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17")
.truncate_dates("2020-10-14", "2020-10-16")
.truncate_dates("2020-10-14", "2020-10-16")
)
self.assertEqual(d1, d2)
def test_idempotent_remove_tickers(self):
d1 = eod.Fundamental(
["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17"
).remove_tickers(["MSFT.US"])
d2 = (
eod.Fundamental(["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17")
.remove_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_add_remove(self):
d1 = eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
d2 = (
eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
.add_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_remove_all_tickers(self):
with self.assertRaises(Exception):
eod.Ohlcv(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17").remove_tickers(
["AAPL.US"]
).retrieve_data()
def test_misspelled_input(self):
with self.assertRaises(Exception):
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="Daoly"
)
def test_ohlcv_data_format_hasnt_changed(
self,
): # Cambiar de antes de formatting a después de formatting
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
url = "https://eodhistoricaldata.com/api/eod/AAPL.US?api_token={}&from=2020-10-13&to=2020-10-17&period=d".format(
TOKEN
)
actual = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
expected = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
pd.testing.assert_frame_equal(actual, expected, rtol=5e-3)
def test_index_formatting(self):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
expected_aapl_formatted = pd.read_csv(
StringIO(
date_parser(
"""
Stock Date Open High Low Close Adjusted_close Volume
AAPL.US 2020-10-13 00:00:00+00:00 125.27 125.390 119.65 121.10 120.7110 262330500.0
AAPL.US 2020-10-14 00:00:00+00:00 121.00 123.030 119.62 121.19 120.8008 151062297.0
AAPL.US 2020-10-15 00:00:00+00:00 118.72 121.200 118.15 120.71 120.3223 112559203.0
AAPL.US 2020-10-16 00:00:00+00:00 121.28 121.548 118.81 119.02 118.6377 115393797.0
"""
)
),
sep="\\s+",
index_col=[0, 1],
converters={"Date": lambda col: datetime.datetime.fromisoformat(col)},
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
formatted_mock = eod.Ohlcv(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).retrieve_data()
pd.testing.assert_frame_equal(
formatted_mock, expected_aapl_formatted, rtol=5e-3
)
# TODO? Write more tests:
# Check that the data is concated/merged/joined properly, particularly when the indexes come with Nans
# Check except clauses
# Check duplicate df values
# Assert errors with wrong args
# etc
# expected_ohlcv_concatted = pd.read_csv( StringIO( date_parser( """
# Stock Date Gmtoffset Datetime Open High Low Close Volume Returns
# BP.LSE 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# BP.LSE 2020-10-14 00:00:00+00:00 0.0 2020-10-13 15:25:00 213.649993 214.000000 213.550003 213.856994 1210380.0 -0.001601
# BP.LSE 2020-10-15 00:00:00+00:00 0.0 2020-10-14 15:25:00 213.000000 213.149993 212.600006 212.649993 1182246.0 0.019660
# BP.LSE 2020-10-16 00:00:00+00:00 0.0 2020-10-15 15:25:00 207.149993 207.199996 206.500000 206.850006 1626720.0 -0.013826
# AAPL.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# AAPL.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 121.139999 121.279998 121.029998 121.050003 4585723.0 0.003648
# AAPL.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 121.580001 121.709999 121.139999 121.180000 3420583.0 0.015419
# AAPL.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 120.790000 120.849998 120.580001 120.699996 3436603.0 -0.003550
# MSFT.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# MSFT.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 223.320007 223.389999 222.750000 222.830001 1457493.0 0.000651
# MSFT.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 221.199996 221.414993 220.600006 220.759994 1122912.0 0.012377
# MSFT.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 219.639999 219.880004 219.490005 219.660003 1201342.0 -0.003900
# """ ) ), sep="\\s+", index_col=[0,1,2], converters = {'Date' : lambda col: datetime.datetime.fromisoformat( col ) \
# , 'Datetime' : lambda col: pd.to_datetime(col, format='%Y-%m-%dT%H:%M:%S', utc=True) } )
if __name__ == "__main__":
unittest.main()
| [] | [] | [
"EOD_TOKEN"
] | [] | ["EOD_TOKEN"] | python | 1 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/129/096/CWE190_Integer_Overflow__int_Environment_multiply_17.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE190_Integer_Overflow__int_Environment_multiply_17.java
Label Definition File: CWE190_Integer_Overflow__int.label.xml
Template File: sources-sinks-17.tmpl.java
*/
/*
* @description
* CWE: 190 Integer Overflow
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: multiply
* GoodSink: Ensure there will not be an overflow before multiplying data by 2
* BadSink : If data is positive, multiply by 2, which can cause an overflow
* Flow Variant: 17 Control flow: for loops
*
* */
import java.util.logging.Level;
public class CWE190_Integer_Overflow__int_Environment_multiply_17 extends AbstractTestCase
{
public void bad() throws Throwable
{
int data;
/* We need to have one source outside of a for loop in order
* to prevent the Java compiler from generating an error because
* data is uninitialized
*/
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
for (int j = 0; j < 1; j++)
{
if(data > 0) /* ensure we won't have an underflow */
{
/* POTENTIAL FLAW: if (data*2) > Integer.MAX_VALUE, this will overflow */
int result = (int)(data * 2);
IO.writeLine("result: " + result);
}
}
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
int data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
for (int j = 0; j < 1; j++)
{
if(data > 0) /* ensure we won't have an underflow */
{
/* POTENTIAL FLAW: if (data*2) > Integer.MAX_VALUE, this will overflow */
int result = (int)(data * 2);
IO.writeLine("result: " + result);
}
}
}
/* goodB2G() - use badsource and goodsink*/
private void goodB2G() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
for (int k = 0; k < 1; k++)
{
if(data > 0) /* ensure we won't have an underflow */
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data < (Integer.MAX_VALUE/2))
{
int result = (int)(data * 2);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too large to perform multiplication.");
}
}
}
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\"",
"\"ADD\""
] | [] | [
"ADD"
] | [] | ["ADD"] | java | 1 | 0 | |
research/object_detection/export_inference_graph.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Tool to export an object detection model for inference.
Prepares an object detection tensorflow graph for inference using model
configuration and a trained checkpoint. Outputs inference
graph, associated checkpoint files, a frozen inference graph and a
SavedModel (https://tensorflow.github.io/serving/serving_basic.html).
The inference graph contains one of three input nodes depending on the user
specified option.
* `image_tensor`: Accepts a uint8 4-D tensor of shape [None, None, None, 3]
* `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None]
containing encoded PNG or JPEG images. Image resolutions are expected to be
the same if more than 1 image is provided.
* `tf_example`: Accepts a 1-D string tensor of shape [None] containing
serialized TFExample protos. Image resolutions are expected to be the same
if more than 1 image is provided.
and the following output nodes returned by the model.postprocess(..):
* `num_detections`: Outputs float32 tensors of the form [batch]
that specifies the number of valid boxes per image in the batch.
* `detection_boxes`: Outputs float32 tensors of the form
[batch, num_boxes, 4] containing detected boxes.
* `detection_scores`: Outputs float32 tensors of the form
[batch, num_boxes] containing class scores for the detections.
* `detection_classes`: Outputs float32 tensors of the form
[batch, num_boxes] containing classes for the detections.
* `detection_masks`: Outputs float32 tensors of the form
[batch, num_boxes, mask_height, mask_width] containing predicted instance
masks for each box if its present in the dictionary of postprocessed
tensors returned by the model.
Notes:
* This tool uses `use_moving_averages` from eval_config to decide which
weights to freeze.
Example Usage:
--------------
python export_inference_graph \
--input_type image_tensor \
--pipeline_config_path path/to/ssd_inception_v2.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
The expected output would be in the directory
path/to/exported_model_directory (which is created if it does not exist)
with contents:
- inference_graph.pbtxt
- model.ckpt.data-00000-of-00001
- model.ckpt.info
- model.ckpt.meta
- frozen_inference_graph.pb
+ saved_model (a directory)
Config overrides (see the `config_override` flag) are text protobufs
(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
certain fields in the provided pipeline_config_path. These are useful for
making small changes to the inference graph that differ from the training or
eval config.
Example Usage (in which we change the second stage post-processing score
threshold to be 0.5):
python export_inference_graph \
--input_type image_tensor \
--pipeline_config_path path/to/ssd_inception_v2.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory \
--config_override " \
model{ \
faster_rcnn { \
second_stage_post_processing { \
batch_non_max_suppression { \
score_threshold: 0.5 \
} \
} \
} \
}"
"""
import tensorflow as tf
from google.protobuf import text_format
from object_detection import exporter
from object_detection.protos import pipeline_pb2
import os
slim = tf.contrib.slim
flags = tf.app.flags
flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be '
'one of [`image_tensor`, `encoded_image_string_tensor`, '
'`tf_example`]')
flags.DEFINE_string('input_shape', None,
'If input_type is `image_tensor`, this can explicitly set '
'the shape of this input tensor to a fixed size. The '
'dimensions are to be provided as a comma-separated list '
'of integers. A value of -1 can be used for unknown '
'dimensions. If not specified, for an `image_tensor, the '
'default shape will be partially specified as '
'`[None, None, None, 3]`.')
flags.DEFINE_string('pipeline_config_path', None,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.')
flags.DEFINE_string('trained_checkpoint_prefix', None,
'Path to trained checkpoint, typically of the form '
'path/to/model.ckpt')
flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
flags.DEFINE_string('config_override', '',
'pipeline_pb2.TrainEvalPipelineConfig '
'text proto to override pipeline_config_path.')
flags.DEFINE_boolean('write_inference_graph', False,
'If true, writes inference graph to disk.')
flags.DEFINE_string('gpuid', '0',
'Which GPU device to use. Separated by commas. Default is 0.')
tf.app.flags.mark_flag_as_required('pipeline_config_path')
tf.app.flags.mark_flag_as_required('trained_checkpoint_prefix')
tf.app.flags.mark_flag_as_required('output_directory')
FLAGS = flags.FLAGS
def main(_):
os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.gpuid)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
text_format.Merge(FLAGS.config_override, pipeline_config)
if FLAGS.input_shape:
input_shape = [
int(dim) if dim != '-1' else None
for dim in FLAGS.input_shape.split(',')
]
else:
input_shape = None
exporter.export_inference_graph(
FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_prefix,
FLAGS.output_directory, input_shape=input_shape,
write_inference_graph=FLAGS.write_inference_graph)
if __name__ == '__main__':
tf.app.run() | [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
internal/provider/resource_rediscloud_subscription_test.go | package provider
import (
"context"
"flag"
"fmt"
"os"
"regexp"
"strconv"
"testing"
"github.com/RedisLabs/rediscloud-go-api/redis"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
var contractFlag = flag.Bool("contract", false,
"Add this flag '-contract' to run tests for contract associated accounts")
func TestAccResourceRedisCloudSubscription_createWithDatabase(t *testing.T) {
name := acctest.RandomWithPrefix(testResourcePrefix)
password := acctest.RandString(20)
resourceName := "rediscloud_subscription.example"
testCloudAccountName := os.Getenv("AWS_TEST_CLOUD_ACCOUNT_NAME")
var subId int
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccAwsPreExistingCloudAccountPreCheck(t) },
ProviderFactories: providerFactories,
CheckDestroy: testAccCheckSubscriptionDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionOneDb, testCloudAccountName, name, 1, password),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "cloud_provider.0.provider", "AWS"),
resource.TestCheckResourceAttr(resourceName, "cloud_provider.0.region.0.preferred_availability_zones.#", "1"),
resource.TestCheckResourceAttrSet(resourceName, "cloud_provider.0.region.0.networks.0.networking_subnet_id"),
resource.TestCheckResourceAttr(resourceName, "database.#", "1"),
resource.TestMatchResourceAttr(resourceName, "database.0.db_id", regexp.MustCompile("^[1-9][0-9]*$")),
resource.TestCheckResourceAttrSet(resourceName, "database.0.password"),
resource.TestCheckResourceAttr(resourceName, "database.0.name", "tf-database"),
resource.TestCheckResourceAttr(resourceName, "database.0.memory_limit_in_gb", "1"),
func(s *terraform.State) error {
r := s.RootModule().Resources[resourceName]
var err error
subId, err = strconv.Atoi(r.Primary.ID)
if err != nil {
return err
}
client := testProvider.Meta().(*apiClient)
sub, err := client.client.Subscription.Get(context.TODO(), subId)
if err != nil {
return err
}
if redis.StringValue(sub.Name) != name {
return fmt.Errorf("unexpected name value: %s", redis.StringValue(sub.Name))
}
listDb := client.client.Database.List(context.TODO(), subId)
if listDb.Next() != true {
return fmt.Errorf("no database found: %s", listDb.Err())
}
if listDb.Err() != nil {
return listDb.Err()
}
return nil
},
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccResourceRedisCloudSubscription_addUpdateDeleteDatabase(t *testing.T) {
if testing.Short() {
t.Skip("Requires manual execution over CI execution")
}
name := acctest.RandomWithPrefix(testResourcePrefix)
password := acctest.RandString(20)
password2 := acctest.RandString(20)
resourceName := "rediscloud_subscription.example"
testCloudAccountName := os.Getenv("AWS_TEST_CLOUD_ACCOUNT_NAME")
var subId int
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccAwsPreExistingCloudAccountPreCheck(t) },
ProviderFactories: providerFactories,
CheckDestroy: testAccCheckSubscriptionDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionOneDb, testCloudAccountName, name, 1, password),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "cloud_provider.0.provider", "AWS"),
resource.TestCheckResourceAttr(resourceName, "cloud_provider.0.region.0.preferred_availability_zones.#", "1"),
resource.TestCheckResourceAttrSet(resourceName, "cloud_provider.0.region.0.networks.0.networking_subnet_id"),
resource.TestCheckResourceAttr(resourceName, "database.#", "1"),
resource.TestMatchResourceAttr(resourceName, "database.0.db_id", regexp.MustCompile("^[1-9][0-9]*$")),
resource.TestCheckResourceAttrSet(resourceName, "database.0.password"),
resource.TestCheckResourceAttr(resourceName, "database.0.name", "tf-database"),
resource.TestCheckResourceAttr(resourceName, "database.0.memory_limit_in_gb", "1"),
func(s *terraform.State) error {
r := s.RootModule().Resources[resourceName]
var err error
subId, err = strconv.Atoi(r.Primary.ID)
if err != nil {
return err
}
client := testProvider.Meta().(*apiClient)
sub, err := client.client.Subscription.Get(context.TODO(), subId)
if err != nil {
return err
}
if redis.StringValue(sub.Name) != name {
return fmt.Errorf("unexpected name value: %s", redis.StringValue(sub.Name))
}
listDb := client.client.Database.List(context.TODO(), subId)
if listDb.Next() != true {
return fmt.Errorf("no database found: %s", listDb.Err())
}
if listDb.Err() != nil {
return listDb.Err()
}
return nil
},
),
},
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionTwoDbs, testCloudAccountName, name, 2, password, password2),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "database.#", "2"),
resource.TestMatchTypeSetElemNestedAttrs(resourceName, "database.*", map[string]*regexp.Regexp{
"db_id": regexp.MustCompile("^[1-9][0-9]*$"),
"name": regexp.MustCompile("tf-database"),
"protocol": regexp.MustCompile("redis"),
"memory_limit_in_gb": regexp.MustCompile("2"),
}),
resource.TestMatchTypeSetElemNestedAttrs(resourceName, "database.*", map[string]*regexp.Regexp{
"db_id": regexp.MustCompile("^[1-9][0-9]*$"),
"name": regexp.MustCompile("tf-database-2"),
"protocol": regexp.MustCompile("memcached"),
"memory_limit_in_gb": regexp.MustCompile("2"),
}),
func(s *terraform.State) error {
r := s.RootModule().Resources[resourceName]
subId, err := strconv.Atoi(r.Primary.ID)
if err != nil {
return err
}
client := testProvider.Meta().(*apiClient)
nameId, err := getDatabaseNameIdMap(context.TODO(), subId, client)
if err != nil {
return err
}
if _, ok := nameId["tf-database"]; !ok {
return fmt.Errorf("first database doesn't exist")
}
if _, ok := nameId["tf-database-2"]; !ok {
return fmt.Errorf("second database doesn't exist")
}
return nil
},
),
},
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionOneDb, testCloudAccountName, name, 2, password),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "database.#", "1"),
resource.TestMatchTypeSetElemNestedAttrs(resourceName, "database.*", map[string]*regexp.Regexp{
"db_id": regexp.MustCompile("^[1-9][0-9]*$"),
"name": regexp.MustCompile("tf-database"),
"protocol": regexp.MustCompile("redis"),
"memory_limit_in_gb": regexp.MustCompile("2"),
}),
func(s *terraform.State) error {
r := s.RootModule().Resources[resourceName]
subId, err := strconv.Atoi(r.Primary.ID)
if err != nil {
return err
}
client := testProvider.Meta().(*apiClient)
nameId, err := getDatabaseNameIdMap(context.TODO(), subId, client)
if err != nil {
return err
}
if _, ok := nameId["tf-database"]; !ok {
return fmt.Errorf("first database doesn't exist")
}
if _, ok := nameId["tf-database-2"]; ok {
return fmt.Errorf("second database still exist")
}
return nil
},
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccResourceRedisCloudSubscription_AddAdditionalDatabaseWithModule(t *testing.T) {
if testing.Short() {
t.Skip("Requires manual execution over CI execution")
}
name := acctest.RandomWithPrefix(testResourcePrefix)
password := acctest.RandString(20)
password2 := acctest.RandString(20)
resourceName := "rediscloud_subscription.example"
testCloudAccountName := os.Getenv("AWS_TEST_CLOUD_ACCOUNT_NAME")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccAwsPreExistingCloudAccountPreCheck(t) },
ProviderFactories: providerFactories,
CheckDestroy: testAccCheckSubscriptionDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionOneDb, testCloudAccountName, name, 1, password),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "database.#", "1"),
resource.TestMatchResourceAttr(resourceName, "database.0.db_id", regexp.MustCompile("^[1-9][0-9]*$")),
resource.TestCheckResourceAttr(resourceName, "database.0.name", "tf-database"),
),
},
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionTwoDbWithModule, testCloudAccountName, name, 2, password, password2),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "database.#", "2"),
resource.TestMatchResourceAttr(resourceName, "database.1.db_id", regexp.MustCompile("^[1-9][0-9]*$")),
resource.TestCheckResourceAttr(resourceName, "database.1.name", "tf-database-2"),
resource.TestCheckResourceAttr(resourceName, "database.1.module.#", "1"),
resource.TestCheckResourceAttr(resourceName, "database.1.module.0.name", "RediSearch"),
),
},
},
})
}
func TestAccResourceRedisCloudSubscription_AddManageDatabaseReplication(t *testing.T) {
if testing.Short() {
t.Skip("Requires manual execution over CI execution")
}
originResourceName := "rediscloud_subscription.origin"
originSubName := acctest.RandomWithPrefix(testResourcePrefix)
originDatabaseName := "tf-database-origin"
originDatabasePassword := acctest.RandString(20)
replicaResourceName := "rediscloud_subscription.replica"
replicaSubName := acctest.RandomWithPrefix(testResourcePrefix)
replicaDatabaseName := "tf-database-replica"
replicaDatabasePassword := acctest.RandString(20)
testCloudAccountName := os.Getenv("AWS_TEST_CLOUD_ACCOUNT_NAME")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccAwsPreExistingCloudAccountPreCheck(t) },
ProviderFactories: providerFactories,
CheckDestroy: testAccCheckSubscriptionDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionsWithReplicaDB, testCloudAccountName, originSubName, originDatabaseName, originDatabasePassword, replicaSubName, replicaDatabaseName, replicaDatabasePassword),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(originResourceName, "name", originSubName),
resource.TestCheckResourceAttr(originResourceName, "database.#", "1"),
resource.TestCheckResourceAttr(originResourceName, "database.0.name", originDatabaseName),
resource.TestCheckResourceAttr(replicaResourceName, "name", replicaSubName),
resource.TestCheckResourceAttr(replicaResourceName, "database.#", "1"),
resource.TestCheckResourceAttr(replicaResourceName, "database.0.name", replicaDatabaseName),
resource.TestCheckResourceAttr(replicaResourceName, "database.0.replica_of.#", "1"),
),
},
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionsWithoutReplicaDB, testCloudAccountName, originSubName, originDatabaseName, originDatabasePassword, replicaSubName, replicaDatabaseName, replicaDatabasePassword),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(replicaResourceName, "name", replicaSubName),
resource.TestCheckResourceAttr(replicaResourceName, "database.#", "1"),
resource.TestCheckResourceAttr(replicaResourceName, "database.0.name", replicaDatabaseName),
resource.TestCheckResourceAttr(replicaResourceName, "database.0.replica_of.#", "0"),
),
},
},
})
}
func TestAccResourceRedisCloudSubscription_createUpdateContractPayment(t *testing.T) {
if !*contractFlag {
t.Skip("The '-contract' parameter wasn't provided in the test command.")
}
name := acctest.RandomWithPrefix(testResourcePrefix)
updatedName := fmt.Sprintf("%v-updatedName", name)
password := acctest.RandString(20)
resourceName := "rediscloud_subscription.example"
testCloudAccountName := os.Getenv("AWS_TEST_CLOUD_ACCOUNT_NAME")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccAwsPreExistingCloudAccountPreCheck(t) },
ProviderFactories: providerFactories,
CheckDestroy: testAccCheckSubscriptionDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionContractPayment, testCloudAccountName, name, 1, password),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "cloud_provider.0.provider", "AWS"),
resource.TestCheckResourceAttr(resourceName, "cloud_provider.0.region.0.preferred_availability_zones.#", "1"),
resource.TestCheckResourceAttrSet(resourceName, "cloud_provider.0.region.0.networks.0.networking_subnet_id"),
resource.TestCheckResourceAttr(resourceName, "database.#", "1"),
resource.TestMatchResourceAttr(resourceName, "database.0.db_id", regexp.MustCompile("^[1-9][0-9]*$")),
resource.TestCheckResourceAttrSet(resourceName, "database.0.password"),
resource.TestCheckResourceAttr(resourceName, "database.0.name", "tf-database"),
resource.TestCheckResourceAttr(resourceName, "database.0.memory_limit_in_gb", "1"),
resource.TestCheckResourceAttrSet(resourceName, "payment_method_id"),
),
},
{
Config: fmt.Sprintf(testAccResourceRedisCloudSubscriptionContractPayment, testCloudAccountName, updatedName, 1, password),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(resourceName, "payment_method_id"),
resource.TestCheckResourceAttr(resourceName, "name", updatedName),
),
},
},
})
}
func testAccCheckSubscriptionDestroy(s *terraform.State) error {
client := testProvider.Meta().(*apiClient)
for _, r := range s.RootModule().Resources {
if r.Type != "rediscloud_subscription" {
continue
}
subId, err := strconv.Atoi(r.Primary.ID)
if err != nil {
return err
}
subs, err := client.client.Subscription.List(context.TODO())
if err != nil {
return err
}
for _, sub := range subs {
if redis.IntValue(sub.ID) == subId {
return fmt.Errorf("subscription %d still exists", subId)
}
}
}
return nil
}
const testAccResourceRedisCloudSubscriptionOneDb = `
data "rediscloud_payment_method" "card" {
card_type = "Visa"
}
data "rediscloud_cloud_account" "account" {
exclude_internal_account = true
provider_type = "AWS"
name = "%s"
}
resource "rediscloud_subscription" "example" {
name = "%s"
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
allowlist {
cidrs = ["192.168.0.0/16"]
}
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["eu-west-1a"]
}
}
database {
name = "tf-database"
protocol = "redis"
memory_limit_in_gb = %d
support_oss_cluster_api = true
data_persistence = "none"
replication = false
throughput_measurement_by = "operations-per-second"
password = "%s"
throughput_measurement_value = 10000
source_ips = ["10.0.0.0/8"]
}
}
`
const testAccResourceRedisCloudSubscriptionTwoDbs = `
data "rediscloud_payment_method" "card" {
card_type = "Visa"
}
data "rediscloud_cloud_account" "account" {
exclude_internal_account = true
provider_type = "AWS"
name = "%s"
}
resource "rediscloud_subscription" "example" {
name = "%s"
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
allowlist {
cidrs = ["192.168.0.0/16"]
}
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["eu-west-1a"]
}
}
database {
name = "tf-database"
protocol = "redis"
memory_limit_in_gb = %d
support_oss_cluster_api = true
data_persistence = "none"
replication = false
throughput_measurement_by = "operations-per-second"
password = "%s"
throughput_measurement_value = 10000
source_ips = ["10.0.0.0/8"]
}
database {
name = "tf-database-2"
protocol = "memcached"
memory_limit_in_gb = 2
data_persistence = "none"
replication = false
throughput_measurement_by = "number-of-shards"
throughput_measurement_value = 2
password = "%s"
}
}
`
const testAccResourceRedisCloudSubscriptionTwoDbWithModule = `
data "rediscloud_payment_method" "card" {
card_type = "Visa"
}
data "rediscloud_cloud_account" "account" {
exclude_internal_account = true
provider_type = "AWS"
name = "%s"
}
resource "rediscloud_subscription" "example" {
name = "%s"
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
allowlist {
cidrs = ["192.168.0.0/16"]
}
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["eu-west-1a"]
}
}
database {
name = "tf-database"
protocol = "redis"
memory_limit_in_gb = %d
support_oss_cluster_api = true
data_persistence = "none"
replication = false
throughput_measurement_by = "operations-per-second"
password = "%s"
throughput_measurement_value = 10000
source_ips = ["10.0.0.0/8"]
}
database {
name = "tf-database-2"
protocol = "redis"
memory_limit_in_gb = 1
support_oss_cluster_api = true
data_persistence = "none"
replication = false
throughput_measurement_by = "operations-per-second"
password = "%s"
throughput_measurement_value = 10000
source_ips = ["10.0.0.0/8"]
module {
name = "RediSearch"
}
}
}
`
const testAccResourceRedisCloudSubscriptionsWithReplicaDB = `
locals {
test_cloud_account_name = "%s"
origin_sub_name = "%s"
origin_db_name = "%s"
origin_db_password = "%s"
replica_sub_name = "%s"
replica_db_name = "%s"
replica_db_password = "%s"
}
data "rediscloud_payment_method" "card" {
card_type = "Visa"
}
data "rediscloud_cloud_account" "account" {
exclude_internal_account = true
provider_type = "AWS"
name = local.test_cloud_account_name
}
resource "rediscloud_subscription" "origin" {
name = local.origin_sub_name
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
persistent_storage_encryption = false
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-2"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = []
}
}
database {
name = local.origin_db_name
protocol = "redis"
memory_limit_in_gb = 1
data_persistence = "none"
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = local.origin_db_password
}
}
resource "rediscloud_subscription" "replica" {
name = local.replica_sub_name
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
persistent_storage_encryption = false
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-2"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = []
}
}
database {
name = local.replica_db_name
protocol = "redis"
memory_limit_in_gb = 1
data_persistence = "none"
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = local.replica_db_password
replica_of = [ {for d in rediscloud_subscription.origin.database : d.name => "redis://${d.public_endpoint}"}[local.origin_db_name] ]
}
}
`
const testAccResourceRedisCloudSubscriptionsWithoutReplicaDB = `
locals {
test_cloud_account_name = "%s"
origin_sub_name = "%s"
origin_db_name = "%s"
origin_db_password = "%s"
replica_sub_name = "%s"
replica_db_name = "%s"
replica_db_password = "%s"
}
data "rediscloud_payment_method" "card" {
card_type = "Visa"
}
data "rediscloud_cloud_account" "account" {
exclude_internal_account = true
provider_type = "AWS"
name = local.test_cloud_account_name
}
resource "rediscloud_subscription" "origin" {
name = local.origin_sub_name
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
persistent_storage_encryption = false
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-2"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = []
}
}
database {
name = local.origin_db_name
protocol = "redis"
memory_limit_in_gb = 1
data_persistence = "none"
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = local.origin_db_password
}
}
resource "rediscloud_subscription" "replica" {
name = local.replica_sub_name
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
persistent_storage_encryption = false
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-2"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = []
}
}
database {
name = local.replica_db_name
protocol = "redis"
memory_limit_in_gb = 1
data_persistence = "none"
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = local.replica_db_password
}
}
`
const testAccResourceRedisCloudSubscriptionContractPayment = `
data "rediscloud_cloud_account" "account" {
exclude_internal_account = true
provider_type = "AWS"
name = "%s"
}
resource "rediscloud_subscription" "example" {
name = "%s"
memory_storage = "ram"
allowlist {
cidrs = ["192.168.0.0/16"]
}
cloud_provider {
provider = data.rediscloud_cloud_account.account.provider_type
cloud_account_id = data.rediscloud_cloud_account.account.id
region {
region = "eu-west-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["eu-west-1a"]
}
}
database {
name = "tf-database"
protocol = "redis"
memory_limit_in_gb = %d
support_oss_cluster_api = true
data_persistence = "none"
replication = false
throughput_measurement_by = "operations-per-second"
password = "%s"
throughput_measurement_value = 10000
source_ips = ["10.0.0.0/8"]
}
}
`
| [
"\"AWS_TEST_CLOUD_ACCOUNT_NAME\"",
"\"AWS_TEST_CLOUD_ACCOUNT_NAME\"",
"\"AWS_TEST_CLOUD_ACCOUNT_NAME\"",
"\"AWS_TEST_CLOUD_ACCOUNT_NAME\"",
"\"AWS_TEST_CLOUD_ACCOUNT_NAME\""
] | [] | [
"AWS_TEST_CLOUD_ACCOUNT_NAME"
] | [] | ["AWS_TEST_CLOUD_ACCOUNT_NAME"] | go | 1 | 0 | |
send_mail.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/13 14:29
# @Author : Alvin
# @File : send_mail.py
import os
# from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
os.environ['DJANGO_SETTINGS_MODULE'] = 'devops.settings'
if __name__ == '__main__':
# send_mail(
# 'devops test email',
# 'This is a test email from DevOps!',
# '[email protected]',
# ['[email protected]'],
# )
subject, from_email, to = 'devops test email', '[email protected]', '[email protected]'
text_content = 'This is a test email from DevOps! http://127.0.0.1:8000/login'
html_content = '<p>This is a test email from DevOps! <a href="http://127.0.0.1:8000/login" target=blank>http://127.0.0.1:8000/login</a></p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send() | [] | [] | [
"DJANGO_SETTINGS_MODULE"
] | [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
fetch_packages.py | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import errno
import re
import shutil
import subprocess
import sys, getopt
import platform
from time import sleep
_RETRIES = 5
_OPT_VERBOSE = None
_OPT_DRY_RUN = None
_PACKAGE_CACHE='/tmp/cache/' + os.environ['USER'] + '/webui_third_party'
_NODE_MODULES='./node_modules'
_TMP_NODE_MODULES=_PACKAGE_CACHE + '/' + _NODE_MODULES
_TAR_COMMAND = ['tar']
_CACHED_PKG_DISTROS = ('Ubuntu', 'Red Hat', 'CentOS', 'darwin')
from lxml import objectify
def getFilename(pkg, url):
element = pkg.find("local-filename")
if element:
return str(element)
(path, filename) = url.rsplit('/', 1)
m = re.match(r'\w+\?\w+=(.*)', filename)
if m:
filename = m.group(1)
return filename
def setTarCommand():
if isTarGnuVersion():
print 'GNU tar found. we will skip the no-unknown-keyword warning'
global _TAR_COMMAND
_TAR_COMMAND = ['tar', '--warning=no-unknown-keyword']
else:
print 'No GNU tar. will use default tar utility'
def isTarGnuVersion():
cmd = subprocess.Popen(['tar', '--version'],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
(first, _) = output.split('\n', 1)
if first.lower().find('gnu') != -1:
return True
return False
def getTarDestination(tgzfile, compress_flag):
cmd = subprocess.Popen( _TAR_COMMAND + [ '--exclude=.*','-' + compress_flag + 'tf', tgzfile],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
(first, _) = output.split('\n', 1)
fields = first.split('/')
return fields[0]
def getZipDestination(tgzfile):
cmd = subprocess.Popen(['unzip', '-t', tgzfile],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
lines = output.split('\n')
for line in lines:
print line
m = re.search(r'testing:\s+([\w\-\.]+)\/', line)
if m:
return m.group(1)
return None
def getFileDestination(file):
start = file.rfind('/')
if start < 0:
return None
return file[start+1:]
def ApplyPatches(pkg):
stree = pkg.find('patches')
if stree is None:
return
for patch in stree.getchildren():
cmd = ['patch']
if patch.get('strip'):
cmd.append('-p')
cmd.append(patch.get('strip'))
if _OPT_VERBOSE:
print "Patching %s <%s..." % (' '.join(cmd), str(patch))
if not _OPT_DRY_RUN:
fp = open(str(patch), 'r')
proc = subprocess.Popen(cmd, stdin = fp)
proc.communicate()
#def VarSubst(cmdstr, filename):
# return re.sub(r'\${filename}', filename, cmdstr)
def GetOSDistro():
distro = ''
if sys.platform == 'darwin':
return sys.platform
else:
try:
return platform.linux_distribution()[0]
except:
pass
return distro
def DownloadPackage(url, ccfile, pkg):
md5 = pkg.md5
pkg.ccfile = ccfile
if url.find('$distro') != -1:
# Platform specific package download
distro = GetOSDistro()
if distro == '':
md5 = md5.other
# Remove the $distro from the url and try
url = url.replace('/$distro', '')
# Change the pkg format to npm download the dependencies
if pkg.format == 'npm-cached':
pkg.format = 'npm'
else:
# check if we have the distro in our cache
found = False
for cached_pkg in _CACHED_PKG_DISTROS:
if cached_pkg in distro:
distro = cached_pkg
found = True
break
if found == False:
# Remove the $distro from the url and try
url = url.replace('/$distro', '')
# Change the pkg format to npm download the dependencies
md5 = md5.other
if pkg.format == 'npm-cached':
pkg.format = 'npm'
else:
distro = distro.lower().replace(" ", "")
url = url.replace('$distro', distro)
md5 = md5[distro]
pkg.distro = distro
# Change the ccfile, add distro before the package name
idx = ccfile.rfind("/")
pkgCachePath = ccfile[:idx] + "/" + distro
pkg.pkgCachePath = pkgCachePath
pkg.ccfile = pkgCachePath + "/" + ccfile[idx + 1:]
ccfile = pkg.ccfile.text
# Now create the directory
try:
os.makedirs(pkgCachePath)
except OSError:
pass
print url
#Check if the package already exists
if os.path.isfile(ccfile):
md5sum = FindMd5sum(ccfile)
if md5sum == md5:
return pkg
else:
os.remove(ccfile)
retry_count = 0
while True:
subprocess.call(['wget', '--no-check-certificate', '-O', ccfile, url])
md5sum = FindMd5sum(ccfile)
if _OPT_VERBOSE:
print "Calculated md5sum: %s" % md5sum
print "Expected md5sum: %s" % md5
if md5sum == md5:
return pkg
elif retry_count <= _RETRIES:
os.remove(ccfile)
retry_count += 1
sleep(1)
continue
else:
raise RuntimeError("MD5sum %s, expected(%s) dosen't match for the "
"downloaded package %s" % (md5sum, md5, ccfile))
return pkg
def ProcessPackage(pkg):
print "Processing %s ..." % (pkg['name'])
url = str(pkg['url'])
filename = getFilename(pkg, url)
ccfile = _PACKAGE_CACHE + '/' + filename
installArguments = pkg.find('install-arguments')
if pkg.format == 'npm-cached':
try:
shutil.rmtree(str(_NODE_MODULES + '/' + pkg['name']))
except OSError as exc:
pass
try:
os.makedirs(_NODE_MODULES)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
print 'mkdirs of ' + _NODE_MODULES + ' failed.. Exiting..'
return
#ccfile = _NODE_MODULES + '/' + filename
pkg = DownloadPackage(url, ccfile, pkg)
#
# Determine the name of the directory created by the package.
# unpack-directory means that we 'cd' to the given directory before
# unpacking.
#
ccfile = pkg.ccfile.text
dest = None
unpackdir = pkg.find('unpack-directory')
if unpackdir:
dest = str(unpackdir)
else:
if pkg.format == 'tgz':
dest = getTarDestination(ccfile, 'z')
elif pkg.format == 'npm-cached':
dest = _NODE_MODULES + '/' + getTarDestination(ccfile, 'z')
elif pkg.format == 'tbz':
dest = getTarDestination(ccfile, 'j')
elif pkg.format == 'zip':
dest = getZipDestination(ccfile)
elif pkg.format == 'npm':
dest = getTarDestination(ccfile, 'z')
elif pkg.format == 'file':
dest = getFileDestination(ccfile)
#
# clean directory before unpacking and applying patches
#
rename = pkg.find('rename')
if rename and pkg.format == 'npm-cached':
rename = _NODE_MODULES + '/' + str(rename)
if rename and os.path.isdir(str(rename)):
if not _OPT_DRY_RUN:
shutil.rmtree(str(rename))
elif dest and os.path.isdir(dest):
if _OPT_VERBOSE:
print "Clean directory %s" % dest
if not _OPT_DRY_RUN:
shutil.rmtree(dest)
if unpackdir:
try:
os.makedirs(str(unpackdir))
except OSError as exc:
pass
cmd = None
if pkg.format == 'tgz':
cmd = _TAR_COMMAND + ['-zxvf', ccfile]
elif pkg.format == 'tbz':
cmd = _TAR_COMMAND + ['-jxvf', ccfile]
elif pkg.format == 'zip':
cmd = ['unzip', '-o', ccfile]
elif pkg.format == 'npm':
newDir = _PACKAGE_CACHE
if 'distro' in pkg:
newDir = newDir + pkg.distro
cmd = ['npm', 'install', ccfile, '--prefix', newDir]
if installArguments:
cmd.append(str(installArguments))
elif pkg.format == 'file':
cmd = ['cp', '-af', ccfile, dest]
elif pkg.format == 'npm-cached':
cmd = _TAR_COMMAND + ['-zxvf', ccfile, '-C', _NODE_MODULES]
else:
print 'Unexpected format: %s' % (pkg.format)
return
print 'Issuing command: %s' % (cmd)
if not _OPT_DRY_RUN:
cd = None
if unpackdir:
cd = str(unpackdir)
if pkg.format == 'npm':
try:
os.makedirs(_NODE_MODULES)
os.makedirs(newDir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
print 'mkdirs of ' + _NODE_MODULES + ' ' + newDir + ' failed.. Exiting..'
return
npmCmd = ['cp', '-af', newDir + "/" + _NODE_MODULES + '/' + pkg['name'],
'./node_modules/']
if os.path.exists(newDir + '/' + pkg['name']):
cmd = npmCmd
else:
try:
p = subprocess.Popen(cmd, cwd = cd)
ret = p.wait()
if ret is not 0:
sys.exit('Terminating: ProcessPackage with return code: %d' % ret);
cmd = npmCmd
except OSError:
print ' '.join(cmd) + ' could not be executed, bailing out!'
return
p = subprocess.Popen(cmd, cwd = cd)
ret = p.wait()
if ret is not 0:
sys.exit('Terminating: ProcessPackage with return code: %d' % ret);
if rename and dest:
os.rename(dest, str(rename))
ApplyPatches(pkg)
def FindMd5sum(anyfile):
if sys.platform == 'darwin':
cmd = ['md5', '-r']
else:
cmd = ['md5sum']
cmd.append(anyfile)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = proc.communicate()
md5sum = stdout.split()[0]
return md5sum
def main(filename):
tree = objectify.parse(filename)
root = tree.getroot()
#Check which version of tar is used and skip warning messages.
setTarCommand()
for object in root.iterchildren():
if object.tag == 'package':
ProcessPackage(object)
if __name__ == '__main__':
try:
opts,args = getopt.getopt(sys.argv[1:],"f:",["file="])
except getopt.GetoptError:
raise RuntimeError("Error in parsing the options/arguments")
xmlfile = None
for opt,arg in opts:
if opt in ("-f","--file"):
xmlfile = arg
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
os.makedirs(_PACKAGE_CACHE)
except OSError:
pass
if xmlfile == None:
main('packages.xml')
else:
main(xmlfile)
| [] | [] | [
"USER"
] | [] | ["USER"] | python | 1 | 0 | |
api/client/hijack.go | package client
import (
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/http/httputil"
"os"
"runtime"
"strings"
"github.com/dotcloud/docker/api"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/utils"
)
func (cli *DockerCli) dial() (net.Conn, error) {
if cli.tlsConfig != nil && cli.proto != "unix" {
return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
}
return net.Dial(cli.proto, cli.addr)
}
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
defer func() {
if started != nil {
close(started)
}
}()
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
req.Header.Set("Content-Type", "plain/text")
req.Host = cli.addr
dial, err := cli.dial()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
defer rwc.Close()
if started != nil {
started <- rwc
}
var receiveStdout chan error
var oldState *term.State
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
if stdout != nil || stderr != nil {
receiveStdout = utils.Go(func() (err error) {
defer func() {
if in != nil {
if setRawTerminal && cli.isTerminal {
term.RestoreTerminal(cli.terminalFd, oldState)
}
// For some reason this Close call blocks on darwin..
// As the client exists right after, simply discard the close
// until we find a better solution.
if runtime.GOOS != "darwin" {
in.Close()
}
}
}()
// When TTY is ON, use regular copy
if setRawTerminal && stdout != nil {
_, err = io.Copy(stdout, br)
} else {
_, err = utils.StdCopy(stdout, stderr, br)
}
utils.Debugf("[hijack] End of stdout")
return err
})
}
sendStdin := utils.Go(func() error {
if in != nil {
io.Copy(rwc, in)
utils.Debugf("[hijack] End of stdin")
}
if tcpc, ok := rwc.(*net.TCPConn); ok {
if err := tcpc.CloseWrite(); err != nil {
utils.Debugf("Couldn't send EOF: %s\n", err)
}
} else if unixc, ok := rwc.(*net.UnixConn); ok {
if err := unixc.CloseWrite(); err != nil {
utils.Debugf("Couldn't send EOF: %s\n", err)
}
}
// Discard errors due to pipe interruption
return nil
})
if stdout != nil || stderr != nil {
if err := <-receiveStdout; err != nil {
utils.Debugf("Error receiveStdout: %s", err)
return err
}
}
if !cli.isTerminal {
if err := <-sendStdin; err != nil {
utils.Debugf("Error sendStdin: %s", err)
return err
}
}
return nil
}
| [
"\"NORAW\""
] | [] | [
"NORAW"
] | [] | ["NORAW"] | go | 1 | 0 | |
zk/tests/test_zk.py | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
import pytest
# project
from datadog_checks.zk import ZookeeperCheck
import conftest
def test_check(aggregator, spin_up_zk, get_instance):
"""
Collect ZooKeeper metrics.
"""
zk_check = ZookeeperCheck(conftest.CHECK_NAME, {}, {})
zk_check.check(get_instance)
zk_check.check(get_instance)
# Test metrics
for mname in conftest.STAT_METRICS:
aggregator.assert_metric(mname, tags=["mode:standalone", "mytag"])
zk_version = os.environ.get("ZK_VERSION") or "3.4.10"
if zk_version and LooseVersion(zk_version) > LooseVersion("3.4.0"):
for mname in conftest.MNTR_METRICS:
aggregator.assert_metric(mname, tags=["mode:standalone", "mytag"])
# Test service checks
aggregator.assert_service_check("zookeeper.ruok", status=zk_check.OK)
aggregator.assert_service_check("zookeeper.mode", status=zk_check.OK)
expected_mode = get_instance['expected_mode']
mname = "zookeeper.instances.{}".format(expected_mode)
aggregator.assert_metric(mname, value=1)
aggregator.assert_all_metrics_covered()
def test_wrong_expected_mode(aggregator, spin_up_zk, get_invalid_mode_instance):
"""
Raise a 'critical' service check when ZooKeeper is not in the expected mode.
"""
zk_check = ZookeeperCheck(conftest.CHECK_NAME, {}, {})
zk_check.check(get_invalid_mode_instance)
# Test service checks
aggregator.assert_service_check("zookeeper.mode", status=zk_check.CRITICAL)
def test_error_state(aggregator, spin_up_zk, get_conn_failure_config):
"""
Raise a 'critical' service check when ZooKeeper is in an error state.
Report status as down.
"""
zk_check = ZookeeperCheck(conftest.CHECK_NAME, {}, {})
with pytest.raises(Exception):
zk_check.check(get_conn_failure_config)
aggregator.assert_service_check("zookeeper.ruok", status=zk_check.CRITICAL)
aggregator.assert_metric("zookeeper.instances", tags=["mode:down"], count=1)
expected_mode = get_conn_failure_config['expected_mode']
mname = "zookeeper.instances.{}".format(expected_mode)
aggregator.assert_metric(mname, value=1, count=1)
| [] | [] | [
"ZK_VERSION"
] | [] | ["ZK_VERSION"] | python | 1 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 12337 if testnet else 2337
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| [] | [] | [
"APPDATA"
] | [] | ["APPDATA"] | python | 1 | 0 | |
SABnzbd.py | #!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
if sys.hexversion < 0x03060000:
print("Sorry, requires Python 3.6 or above")
print("You can read more at: https://sabnzbd.org/python3")
sys.exit(1)
import logging
import logging.handlers
import importlib.util
import traceback
import getopt
import signal
import socket
import platform
import subprocess
import ssl
import time
import re
from typing import List, Dict, Any
try:
import Cheetah
import feedparser
import configobj
import cherrypy
import portend
import cryptography
import chardet
except ImportError as e:
print("Not all required Python modules are available, please check requirements.txt")
print("Missing module:", e.name)
print("You can read more at: https://sabnzbd.org/python3")
print("If you still experience problems, remove all .pyc files in this folder and subfolders")
sys.exit(1)
import sabnzbd
import sabnzbd.lang
import sabnzbd.interface
from sabnzbd.constants import *
import sabnzbd.newsunpack
from sabnzbd.misc import (
check_latest_version,
exit_sab,
split_host,
create_https_certificates,
windows_variant,
ip_extract,
set_serv_parms,
get_serv_parms,
get_from_url,
upload_file_to_sabnzbd,
probablyipv4,
)
from sabnzbd.filesystem import get_ext, real_path, long_path, globber_full, remove_file
from sabnzbd.panic import panic_tmpl, panic_port, panic_host, panic, launch_a_browser
import sabnzbd.config as config
import sabnzbd.cfg
import sabnzbd.downloader
import sabnzbd.notifier as notifier
import sabnzbd.zconfig
from sabnzbd.getipaddress import localipv4, publicipv4, ipv6
try:
import win32api
import win32serviceutil
import win32evtlogutil
import win32event
import win32service
import win32ts
import pywintypes
win32api.SetConsoleCtrlHandler(sabnzbd.sig_handler, True)
from sabnzbd.utils.apireg import get_connection_info, set_connection_info, del_connection_info
except ImportError:
if sabnzbd.WIN32:
print("Sorry, requires Python module PyWin32.")
sys.exit(1)
# Global for this module, signaling loglevel change
LOG_FLAG = False
def guard_loglevel():
""" Callback function for guarding loglevel """
global LOG_FLAG
LOG_FLAG = True
def warning_helpful(*args, **kwargs):
""" Wrapper to ignore helpfull warnings if desired """
if sabnzbd.cfg.helpfull_warnings():
return logging.warning(*args, **kwargs)
return logging.info(*args, **kwargs)
logging.warning_helpful = warning_helpful
class GUIHandler(logging.Handler):
"""Logging handler collects the last warnings/errors/exceptions
to be displayed in the web-gui
"""
def __init__(self, size):
""" Initializes the handler """
logging.Handler.__init__(self)
self._size: int = size
self.store: List[Dict[str, Any]] = []
def emit(self, record: logging.LogRecord):
""" Emit a record by adding it to our private queue """
parsed_msg = record.msg % record.args
if record.levelno == logging.WARNING:
sabnzbd.notifier.send_notification(T("Warning"), parsed_msg, "warning")
else:
sabnzbd.notifier.send_notification(T("Error"), parsed_msg, "error")
# Append traceback, if available
warning = {"type": record.levelname, "text": parsed_msg, "time": int(time.time())}
if record.exc_info:
warning["text"] = "%s\n%s" % (warning["text"], traceback.format_exc())
# Loose the oldest record
if len(self.store) >= self._size:
self.store.pop(0)
self.store.append(warning)
def clear(self):
self.store = []
def count(self):
return len(self.store)
def content(self):
""" Return an array with last records """
return self.store
def print_help():
print()
print(("Usage: %s [-f <configfile>] <other options>" % sabnzbd.MY_NAME))
print()
print("Options marked [*] are stored in the config file")
print()
print("Options:")
print(" -f --config-file <ini> Location of config file")
print(" -s --server <srv:port> Listen on server:port [*]")
print(" -t --templates <templ> Template directory [*]")
print()
print(" -l --logging <-1..2> Set logging level (-1=off, 0= least, 2= most) [*]")
print(" -w --weblogging Enable cherrypy access logging")
print()
print(" -b --browser <0..1> Auto browser launch (0= off, 1= on) [*]")
if sabnzbd.WIN32:
print(" -d --daemon Use when run as a service")
else:
print(" -d --daemon Fork daemon process")
print(" --pid <path> Create a PID file in the given folder (full path)")
print(" --pidfile <path> Create a PID file with the given name (full path)")
print()
print(" -h --help Print this message")
print(" -v --version Print version information")
print(" -c --clean Remove queue, cache and logs")
print(" -p --pause Start in paused mode")
print(" --repair Add orphaned jobs from the incomplete folder to the queue")
print(" --repair-all Try to reconstruct the queue from the incomplete folder")
print(" with full data reconstruction")
print(" --https <port> Port to use for HTTPS server")
print(" --ipv6_hosting <0|1> Listen on IPv6 address [::1] [*]")
print(" --no-login Start with username and password reset")
print(" --log-all Log all article handling (for developers)")
print(" --disable-file-log Logging is only written to console")
print(" --new Run a new instance of SABnzbd")
print()
print("NZB (or related) file:")
print(" NZB or compressed NZB file, with extension .nzb, .zip, .rar, .7z, .gz, or .bz2")
print()
def print_version():
print(
(
"""
%s-%s
Copyright (C) 2007-2020 The SABnzbd-Team <[email protected]>
SABnzbd comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions. It is licensed under the
GNU GENERAL PUBLIC LICENSE Version 2 or (at your option) any later version.
"""
% (sabnzbd.MY_NAME, sabnzbd.__version__)
)
)
def daemonize():
""" Daemonize the process, based on various StackOverflow answers """
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
print("fork() failed")
sys.exit(1)
os.chdir(sabnzbd.DIR_PROG)
os.setsid()
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int("077", 8))
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
print("fork() failed")
sys.exit(1)
# Flush I/O buffers
sys.stdout.flush()
sys.stderr.flush()
# Get log file path and remove the log file if it got too large
log_path = os.path.join(sabnzbd.cfg.log_dir.get_path(), DEF_LOG_ERRFILE)
if os.path.exists(log_path) and os.path.getsize(log_path) > sabnzbd.cfg.log_size():
remove_file(log_path)
# Replace file descriptors for stdin, stdout, and stderr
with open("/dev/null", "rb", 0) as f:
os.dup2(f.fileno(), sys.stdin.fileno())
with open(log_path, "ab", 0) as f:
os.dup2(f.fileno(), sys.stdout.fileno())
with open(log_path, "ab", 0) as f:
os.dup2(f.fileno(), sys.stderr.fileno())
def abort_and_show_error(browserhost, cherryport, err=""):
""" Abort program because of CherryPy troubles """
logging.error(T("Failed to start web-interface") + " : " + str(err))
if not sabnzbd.DAEMON:
if "49" in err:
panic_host(browserhost, cherryport)
else:
panic_port(browserhost, cherryport)
sabnzbd.halt()
exit_sab(2)
def identify_web_template(key, defweb, wdir):
""" Determine a correct web template set, return full template path """
if wdir is None:
try:
wdir = fix_webname(key())
except:
wdir = ""
if not wdir:
wdir = defweb
if key:
key.set(wdir)
if not wdir:
# No default value defined, accept empty path
return ""
full_dir = real_path(sabnzbd.DIR_INTERFACES, wdir)
full_main = real_path(full_dir, DEF_MAIN_TMPL)
if not os.path.exists(full_main):
logging.warning_helpful(T("Cannot find web template: %s, trying standard template"), full_main)
full_dir = real_path(sabnzbd.DIR_INTERFACES, DEF_STDINTF)
full_main = real_path(full_dir, DEF_MAIN_TMPL)
if not os.path.exists(full_main):
logging.exception("Cannot find standard template: %s", full_dir)
panic_tmpl(full_dir)
exit_sab(1)
logging.info("Template location for %s is %s", defweb, full_dir)
return real_path(full_dir, "templates")
def check_template_scheme(color, web_dir):
""" Check existence of color-scheme """
if color and os.path.exists(os.path.join(web_dir, "static", "stylesheets", "colorschemes", color + ".css")):
return color
elif color and os.path.exists(os.path.join(web_dir, "static", "stylesheets", "colorschemes", color)):
return color
else:
return ""
def fix_webname(name):
if name:
xname = name.title()
else:
xname = ""
if xname in ("Default",):
return "Glitter"
elif xname in ("Glitter", "Plush"):
return xname
elif xname in ("Wizard",):
return name.lower()
elif xname in ("Config",):
return "Glitter"
else:
return name
def get_user_profile_paths(vista_plus):
""" Get the default data locations on Windows"""
if sabnzbd.DAEMON:
# In daemon mode, do not try to access the user profile
# just assume that everything defaults to the program dir
sabnzbd.DIR_LCLDATA = sabnzbd.DIR_PROG
sabnzbd.DIR_HOME = sabnzbd.DIR_PROG
if sabnzbd.WIN32:
# Ignore Win32 "logoff" signal
# This should work, but it doesn't
# Instead the signal_handler will ignore the "logoff" signal
# signal.signal(5, signal.SIG_IGN)
pass
return
elif sabnzbd.WIN32:
try:
from win32com.shell import shell, shellcon
path = shell.SHGetFolderPath(0, shellcon.CSIDL_LOCAL_APPDATA, None, 0)
sabnzbd.DIR_LCLDATA = os.path.join(path, DEF_WORKDIR)
sabnzbd.DIR_HOME = os.environ["USERPROFILE"]
except:
try:
if vista_plus:
root = os.environ["AppData"]
user = os.environ["USERPROFILE"]
sabnzbd.DIR_LCLDATA = "%s\\%s" % (root.replace("\\Roaming", "\\Local"), DEF_WORKDIR)
sabnzbd.DIR_HOME = user
else:
root = os.environ["USERPROFILE"]
sabnzbd.DIR_LCLDATA = "%s\\%s" % (root, DEF_WORKDIR)
sabnzbd.DIR_HOME = root
except:
pass
# Long-path everything
sabnzbd.DIR_LCLDATA = long_path(sabnzbd.DIR_LCLDATA)
sabnzbd.DIR_HOME = long_path(sabnzbd.DIR_HOME)
return
elif sabnzbd.DARWIN:
home = os.environ.get("HOME")
if home:
sabnzbd.DIR_LCLDATA = "%s/Library/Application Support/SABnzbd" % home
sabnzbd.DIR_HOME = home
return
else:
# Unix/Linux
home = os.environ.get("HOME")
if home:
sabnzbd.DIR_LCLDATA = "%s/.%s" % (home, DEF_WORKDIR)
sabnzbd.DIR_HOME = home
return
# Nothing worked
panic("Cannot access the user profile.", "Please start with sabnzbd.ini file in another location")
exit_sab(2)
def print_modules():
""" Log all detected optional or external modules """
if sabnzbd.decoder.SABYENC_ENABLED:
# Yes, we have SABYenc, and it's the correct version, so it's enabled
logging.info("SABYenc module (v%s)... found!", sabnzbd.decoder.SABYENC_VERSION)
else:
# Something wrong with SABYenc, so let's determine and print what:
if sabnzbd.decoder.SABYENC_VERSION:
# We have a VERSION, thus a SABYenc module, but it's not the correct version
logging.error(
T("SABYenc disabled: no correct version found! (Found v%s, expecting v%s)"),
sabnzbd.decoder.SABYENC_VERSION,
sabnzbd.constants.SABYENC_VERSION_REQUIRED,
)
else:
# No SABYenc module at all
logging.error(
T("SABYenc module... NOT found! Expecting v%s - https://sabnzbd.org/sabyenc"),
sabnzbd.constants.SABYENC_VERSION_REQUIRED,
)
# Do not allow downloading
sabnzbd.NO_DOWNLOADING = True
logging.info("Cryptography module (v%s)... found!", cryptography.__version__)
if sabnzbd.WIN32 and sabnzbd.newsunpack.MULTIPAR_COMMAND:
logging.info("MultiPar binary... found (%s)", sabnzbd.newsunpack.MULTIPAR_COMMAND)
elif sabnzbd.newsunpack.PAR2_COMMAND:
logging.info("par2 binary... found (%s)", sabnzbd.newsunpack.PAR2_COMMAND)
else:
logging.error(T("par2 binary... NOT found!"))
# Do not allow downloading
sabnzbd.NO_DOWNLOADING = True
if sabnzbd.newsunpack.RAR_COMMAND:
logging.info("UNRAR binary... found (%s)", sabnzbd.newsunpack.RAR_COMMAND)
# Report problematic unrar
if sabnzbd.newsunpack.RAR_PROBLEM:
have_str = "%.2f" % (float(sabnzbd.newsunpack.RAR_VERSION) / 100)
want_str = "%.2f" % (float(sabnzbd.constants.REC_RAR_VERSION) / 100)
logging.warning_helpful(
T("Your UNRAR version is %s, we recommend version %s or higher.<br />"), have_str, want_str
)
elif not (sabnzbd.WIN32 or sabnzbd.DARWIN):
logging.info("UNRAR binary version %.2f", (float(sabnzbd.newsunpack.RAR_VERSION) / 100))
else:
logging.error(T("unrar binary... NOT found"))
# Do not allow downloading
sabnzbd.NO_DOWNLOADING = True
# If available, we prefer 7zip over unzip
if sabnzbd.newsunpack.SEVEN_COMMAND:
logging.info("7za binary... found (%s)", sabnzbd.newsunpack.SEVEN_COMMAND)
else:
logging.info(T("7za binary... NOT found!"))
if sabnzbd.newsunpack.ZIP_COMMAND:
logging.info("unzip binary... found (%s)", sabnzbd.newsunpack.ZIP_COMMAND)
else:
logging.info(T("unzip binary... NOT found!"))
if not sabnzbd.WIN32:
if sabnzbd.newsunpack.NICE_COMMAND:
logging.info("nice binary... found (%s)", sabnzbd.newsunpack.NICE_COMMAND)
else:
logging.info("nice binary... NOT found!")
if sabnzbd.newsunpack.IONICE_COMMAND:
logging.info("ionice binary... found (%s)", sabnzbd.newsunpack.IONICE_COMMAND)
else:
logging.info("ionice binary... NOT found!")
# Show fatal warning
if sabnzbd.NO_DOWNLOADING:
logging.error(T("Essential modules are missing, downloading cannot start."))
def all_localhosts():
""" Return all unique values of localhost in order of preference """
ips = ["127.0.0.1"]
try:
# Check whether IPv6 is available and enabled
info = socket.getaddrinfo("::1", None)
af, socktype, proto, _canonname, _sa = info[0]
s = socket.socket(af, socktype, proto)
s.close()
except socket.error:
return ips
try:
info = socket.getaddrinfo("localhost", None)
except socket.error:
# localhost does not resolve
return ips
ips = []
for item in info:
item = item[4][0]
# Avoid problems on strange Linux settings
if not isinstance(item, str):
continue
# Only return IPv6 when enabled
if item not in ips and ("::1" not in item or sabnzbd.cfg.ipv6_hosting()):
ips.append(item)
return ips
def check_resolve(host):
""" Return True if 'host' resolves """
try:
socket.getaddrinfo(host, None)
except socket.error:
# Does not resolve
return False
return True
def get_webhost(cherryhost, cherryport, https_port):
"""Determine the webhost address and port,
return (host, port, browserhost)
"""
if cherryhost == "0.0.0.0" and not check_resolve("127.0.0.1"):
cherryhost = ""
elif cherryhost == "::" and not check_resolve("::1"):
cherryhost = ""
if cherryhost is None:
cherryhost = sabnzbd.cfg.cherryhost()
else:
sabnzbd.cfg.cherryhost.set(cherryhost)
# Get IP address, but discard APIPA/IPV6
# If only APIPA's or IPV6 are found, fall back to localhost
ipv4 = ipv6 = False
localhost = hostip = "localhost"
try:
info = socket.getaddrinfo(socket.gethostname(), None)
except socket.error:
# Hostname does not resolve
try:
# Valid user defined name?
info = socket.getaddrinfo(cherryhost, None)
except socket.error:
if cherryhost not in LOCALHOSTS:
cherryhost = "0.0.0.0"
try:
info = socket.getaddrinfo(localhost, None)
except socket.error:
info = socket.getaddrinfo("127.0.0.1", None)
localhost = "127.0.0.1"
for item in info:
ip = str(item[4][0])
if ip.startswith("169.254."):
pass # Automatic Private IP Addressing (APIPA)
elif ":" in ip:
ipv6 = True
elif "." in ip and not ipv4:
ipv4 = True
hostip = ip
# A blank host will use the local ip address
if cherryhost == "":
if ipv6 and ipv4:
# To protect Firefox users, use numeric IP
cherryhost = hostip
browserhost = hostip
else:
cherryhost = socket.gethostname()
browserhost = cherryhost
# 0.0.0.0 will listen on all ipv4 interfaces (no ipv6 addresses)
elif cherryhost == "0.0.0.0":
# Just take the gamble for this
cherryhost = "0.0.0.0"
browserhost = localhost
# :: will listen on all ipv6 interfaces (no ipv4 addresses)
elif cherryhost in ("::", "[::]"):
cherryhost = cherryhost.strip("[").strip("]")
# Assume '::1' == 'localhost'
browserhost = localhost
# IPV6 address
elif "[" in cherryhost or ":" in cherryhost:
browserhost = cherryhost
# IPV6 numeric address
elif cherryhost.replace(".", "").isdigit():
# IPV4 numerical
browserhost = cherryhost
elif cherryhost == localhost:
cherryhost = localhost
browserhost = localhost
else:
# If on Vista and/or APIPA, use numerical IP, to help FireFoxers
if ipv6 and ipv4:
cherryhost = hostip
browserhost = cherryhost
# Some systems don't like brackets in numerical ipv6
if sabnzbd.DARWIN:
cherryhost = cherryhost.strip("[]")
else:
try:
socket.getaddrinfo(cherryhost, None)
except socket.error:
cherryhost = cherryhost.strip("[]")
if ipv6 and ipv4 and browserhost not in LOCALHOSTS:
sabnzbd.AMBI_LOCALHOST = True
logging.info("IPV6 has priority on this system, potential Firefox issue")
if ipv6 and ipv4 and cherryhost == "" and sabnzbd.WIN32:
logging.warning_helpful(T("Please be aware the 0.0.0.0 hostname will need an IPv6 address for external access"))
if cherryhost == "localhost" and not sabnzbd.WIN32 and not sabnzbd.DARWIN:
# On the Ubuntu family, localhost leads to problems for CherryPy
ips = ip_extract()
if "127.0.0.1" in ips and "::1" in ips:
cherryhost = "127.0.0.1"
if ips[0] != "127.0.0.1":
browserhost = "127.0.0.1"
# This is to please Chrome on macOS
if cherryhost == "localhost" and sabnzbd.DARWIN:
cherryhost = "127.0.0.1"
browserhost = "localhost"
if cherryport is None:
cherryport = sabnzbd.cfg.cherryport.get_int()
else:
sabnzbd.cfg.cherryport.set(str(cherryport))
if https_port is None:
https_port = sabnzbd.cfg.https_port.get_int()
else:
sabnzbd.cfg.https_port.set(str(https_port))
# if the https port was specified, assume they want HTTPS enabling also
sabnzbd.cfg.enable_https.set(True)
if cherryport == https_port and sabnzbd.cfg.enable_https():
sabnzbd.cfg.enable_https.set(False)
# Should have a translated message, but that's not available yet
logging.error(T("HTTP and HTTPS ports cannot be the same"))
return cherryhost, cherryport, browserhost, https_port
def attach_server(host, port, cert=None, key=None, chain=None):
""" Define and attach server, optionally HTTPS """
if sabnzbd.cfg.ipv6_hosting() or "::1" not in host:
http_server = cherrypy._cpserver.Server()
http_server.bind_addr = (host, port)
if cert and key:
http_server.ssl_module = "builtin"
http_server.ssl_certificate = cert
http_server.ssl_private_key = key
http_server.ssl_certificate_chain = chain
http_server.subscribe()
def is_sabnzbd_running(url):
""" Return True when there's already a SABnzbd instance running. """
try:
url = "%s&mode=version" % url
# Do this without certificate verification, few installations will have that
prev = sabnzbd.set_https_verification(False)
ver = get_from_url(url)
sabnzbd.set_https_verification(prev)
return ver and (re.search(r"\d+\.\d+\.", ver) or ver.strip() == sabnzbd.__version__)
except:
return False
def find_free_port(host, currentport):
""" Return a free port, 0 when nothing is free """
n = 0
while n < 10 and currentport <= 49151:
try:
portend.free(host, currentport, timeout=0.025)
return currentport
except:
currentport += 5
n += 1
return 0
def check_for_sabnzbd(url, upload_nzbs, allow_browser=True):
"""Check for a running instance of sabnzbd on this port
allow_browser==True|None will launch the browser, False will not.
"""
if allow_browser is None:
allow_browser = True
if is_sabnzbd_running(url):
# Upload any specified nzb files to the running instance
if upload_nzbs:
prev = sabnzbd.set_https_verification(False)
for f in upload_nzbs:
upload_file_to_sabnzbd(url, f)
sabnzbd.set_https_verification(prev)
else:
# Launch the web browser and quit since sabnzbd is already running
# Trim away everything after the final slash in the URL
url = url[: url.rfind("/") + 1]
launch_a_browser(url, force=allow_browser)
exit_sab(0)
return True
return False
def evaluate_inipath(path):
"""Derive INI file path from a partial path.
Full file path: if file does not exist the name must contain a dot
but not a leading dot.
foldername is enough, the standard name will be appended.
"""
path = os.path.normpath(os.path.abspath(path))
inipath = os.path.join(path, DEF_INI_FILE)
if os.path.isdir(path):
return inipath
elif os.path.isfile(path) or os.path.isfile(path + ".bak"):
return path
else:
_dirpart, name = os.path.split(path)
if name.find(".") < 1:
return inipath
else:
return path
def commandline_handler():
"""Split win32-service commands are true parameters
Returns:
service, sab_opts, serv_opts, upload_nzbs
"""
service = ""
sab_opts = []
serv_opts = [os.path.normpath(os.path.abspath(sys.argv[0]))]
upload_nzbs = []
# macOS binary: get rid of the weird -psn_0_123456 parameter
for arg in sys.argv:
if arg.startswith("-psn_"):
sys.argv.remove(arg)
break
# Ugly hack to remove the extra "SABnzbd*" parameter the Windows binary
# gets when it's restarted
if len(sys.argv) > 1 and "sabnzbd" in sys.argv[1].lower() and not sys.argv[1].startswith("-"):
slice_start = 2
else:
slice_start = 1
# Prepend options from env-variable to options
info = os.environ.get("SABnzbd", "").split()
info.extend(sys.argv[slice_start:])
try:
opts, args = getopt.getopt(
info,
"phdvncwl:s:f:t:b:2:",
[
"pause",
"help",
"daemon",
"nobrowser",
"clean",
"logging=",
"weblogging",
"server=",
"templates",
"ipv6_hosting=",
"template2",
"browser=",
"config-file=",
"force",
"disable-file-log",
"version",
"https=",
"autorestarted",
"repair",
"repair-all",
"log-all",
"no-login",
"pid=",
"new",
"console",
"pidfile=",
# Below Win32 Service options
"password=",
"username=",
"startup=",
"perfmonini=",
"perfmondll=",
"interactive",
"wait=",
],
)
except getopt.GetoptError:
print_help()
exit_sab(2)
# Check for Win32 service commands
if args and args[0] in ("install", "update", "remove", "start", "stop", "restart", "debug"):
service = args[0]
serv_opts.extend(args)
if not service:
# Get and remove any NZB file names
for entry in args:
if get_ext(entry) in VALID_NZB_FILES + VALID_ARCHIVES:
upload_nzbs.append(os.path.abspath(entry))
for opt, arg in opts:
if opt in ("password", "username", "startup", "perfmonini", "perfmondll", "interactive", "wait"):
# Service option, just collect
if service:
serv_opts.append(opt)
if arg:
serv_opts.append(arg)
else:
if opt == "-f":
arg = os.path.normpath(os.path.abspath(arg))
sab_opts.append((opt, arg))
return service, sab_opts, serv_opts, upload_nzbs
def get_f_option(opts):
""" Return value of the -f option """
for opt, arg in opts:
if opt == "-f":
return arg
else:
return None
def main():
global LOG_FLAG
import sabnzbd # Due to ApplePython bug
autobrowser = None
autorestarted = False
sabnzbd.MY_FULLNAME = sys.argv[0]
sabnzbd.MY_NAME = os.path.basename(sabnzbd.MY_FULLNAME)
fork = False
pause = False
inifile = None
cherryhost = None
cherryport = None
https_port = None
cherrypylogging = None
clean_up = False
logging_level = None
no_file_log = False
web_dir = None
vista_plus = False
win64 = False
repair = 0
no_login = False
sabnzbd.RESTART_ARGS = [sys.argv[0]]
pid_path = None
pid_file = None
new_instance = False
ipv6_hosting = None
_service, sab_opts, _serv_opts, upload_nzbs = commandline_handler()
for opt, arg in sab_opts:
if opt == "--servicecall":
sabnzbd.MY_FULLNAME = arg
elif opt in ("-d", "--daemon"):
if not sabnzbd.WIN32:
fork = True
autobrowser = False
sabnzbd.DAEMON = True
sabnzbd.RESTART_ARGS.append(opt)
elif opt in ("-f", "--config-file"):
inifile = arg
sabnzbd.RESTART_ARGS.append(opt)
sabnzbd.RESTART_ARGS.append(arg)
elif opt in ("-h", "--help"):
print_help()
exit_sab(0)
elif opt in ("-t", "--templates"):
web_dir = arg
elif opt in ("-s", "--server"):
(cherryhost, cherryport) = split_host(arg)
elif opt in ("-n", "--nobrowser"):
autobrowser = False
elif opt in ("-b", "--browser"):
try:
autobrowser = bool(int(arg))
except ValueError:
autobrowser = True
elif opt == "--autorestarted":
autorestarted = True
elif opt in ("-c", "--clean"):
clean_up = True
elif opt in ("-w", "--weblogging"):
cherrypylogging = True
elif opt in ("-l", "--logging"):
try:
logging_level = int(arg)
except:
logging_level = -2
if logging_level < -1 or logging_level > 2:
print_help()
exit_sab(1)
elif opt in ("-v", "--version"):
print_version()
exit_sab(0)
elif opt in ("-p", "--pause"):
pause = True
elif opt == "--https":
https_port = int(arg)
sabnzbd.RESTART_ARGS.append(opt)
sabnzbd.RESTART_ARGS.append(arg)
elif opt == "--repair":
repair = 1
pause = True
elif opt == "--repair-all":
repair = 2
pause = True
elif opt == "--log-all":
sabnzbd.LOG_ALL = True
elif opt == "--disable-file-log":
no_file_log = True
elif opt == "--no-login":
no_login = True
elif opt == "--pid":
pid_path = arg
sabnzbd.RESTART_ARGS.append(opt)
sabnzbd.RESTART_ARGS.append(arg)
elif opt == "--pidfile":
pid_file = arg
sabnzbd.RESTART_ARGS.append(opt)
sabnzbd.RESTART_ARGS.append(arg)
elif opt == "--new":
new_instance = True
elif opt == "--ipv6_hosting":
ipv6_hosting = arg
sabnzbd.MY_FULLNAME = os.path.normpath(os.path.abspath(sabnzbd.MY_FULLNAME))
sabnzbd.MY_NAME = os.path.basename(sabnzbd.MY_FULLNAME)
sabnzbd.DIR_PROG = os.path.dirname(sabnzbd.MY_FULLNAME)
sabnzbd.DIR_INTERFACES = real_path(sabnzbd.DIR_PROG, DEF_INTERFACES)
sabnzbd.DIR_LANGUAGE = real_path(sabnzbd.DIR_PROG, DEF_LANGUAGE)
org_dir = os.getcwd()
# Need console logging for SABnzbd.py and SABnzbd-console.exe
console_logging = (not hasattr(sys, "frozen")) or (sabnzbd.MY_NAME.lower().find("-console") > 0)
console_logging = console_logging and not sabnzbd.DAEMON
LOGLEVELS = (logging.FATAL, logging.WARNING, logging.INFO, logging.DEBUG)
# Setup primary logging to prevent default console logging
gui_log = GUIHandler(MAX_WARNINGS)
gui_log.setLevel(logging.WARNING)
format_gui = "%(asctime)s\n%(levelname)s\n%(message)s"
gui_log.setFormatter(logging.Formatter(format_gui))
sabnzbd.GUIHANDLER = gui_log
# Create logger
logger = logging.getLogger("")
logger.setLevel(logging.WARNING)
logger.addHandler(gui_log)
# Detect Windows variant
if sabnzbd.WIN32:
vista_plus, win64 = windows_variant()
sabnzbd.WIN64 = win64
if inifile:
# INI file given, simplest case
inifile = evaluate_inipath(inifile)
else:
# No ini file given, need profile data
get_user_profile_paths(vista_plus)
# Find out where INI file is
inifile = os.path.abspath(os.path.join(sabnzbd.DIR_LCLDATA, DEF_INI_FILE))
# Long-path notation on Windows to be sure
inifile = long_path(inifile)
# If INI file at non-std location, then use INI location as $HOME
if sabnzbd.DIR_LCLDATA != os.path.dirname(inifile):
sabnzbd.DIR_HOME = os.path.dirname(inifile)
# All system data dirs are relative to the place we found the INI file
sabnzbd.DIR_LCLDATA = os.path.dirname(inifile)
if not os.path.exists(inifile) and not os.path.exists(inifile + ".bak") and not os.path.exists(sabnzbd.DIR_LCLDATA):
try:
os.makedirs(sabnzbd.DIR_LCLDATA)
except IOError:
panic('Cannot create folder "%s".' % sabnzbd.DIR_LCLDATA, "Check specified INI file location.")
exit_sab(1)
sabnzbd.cfg.set_root_folders(sabnzbd.DIR_HOME, sabnzbd.DIR_LCLDATA)
res, msg = config.read_config(inifile)
if not res:
panic(msg, "Specify a correct file or delete this file.")
exit_sab(1)
# Set root folders for HTTPS server file paths
sabnzbd.cfg.set_root_folders2()
if ipv6_hosting is not None:
sabnzbd.cfg.ipv6_hosting.set(ipv6_hosting)
# Determine web host address
cherryhost, cherryport, browserhost, https_port = get_webhost(cherryhost, cherryport, https_port)
enable_https = sabnzbd.cfg.enable_https()
# When this is a daemon, just check and bail out if port in use
if sabnzbd.DAEMON:
if enable_https and https_port:
try:
portend.free(cherryhost, https_port, timeout=0.05)
except IOError:
abort_and_show_error(browserhost, cherryport)
except:
abort_and_show_error(browserhost, cherryport, "49")
try:
portend.free(cherryhost, cherryport, timeout=0.05)
except IOError:
abort_and_show_error(browserhost, cherryport)
except:
abort_and_show_error(browserhost, cherryport, "49")
# Windows instance is reachable through registry
url = None
if sabnzbd.WIN32 and not new_instance:
url = get_connection_info()
if url and check_for_sabnzbd(url, upload_nzbs, autobrowser):
exit_sab(0)
# SSL
if enable_https:
port = https_port or cherryport
try:
portend.free(browserhost, port, timeout=0.05)
except IOError as error:
if str(error) == "Port not bound.":
pass
else:
if not url:
url = "https://%s:%s%s/api?" % (browserhost, port, sabnzbd.cfg.url_base())
if new_instance or not check_for_sabnzbd(url, upload_nzbs, autobrowser):
# Bail out if we have fixed our ports after first start-up
if sabnzbd.cfg.fixed_ports():
abort_and_show_error(browserhost, cherryport)
# Find free port to bind
newport = find_free_port(browserhost, port)
if newport > 0:
# Save the new port
if https_port:
https_port = newport
sabnzbd.cfg.https_port.set(newport)
else:
# In case HTTPS == HTTP port
cherryport = newport
sabnzbd.cfg.cherryport.set(newport)
except:
# Something else wrong, probably badly specified host
abort_and_show_error(browserhost, cherryport, "49")
# NonSSL check if there's no HTTPS or we only use 1 port
if not (enable_https and not https_port):
try:
portend.free(browserhost, cherryport, timeout=0.05)
except IOError as error:
if str(error) == "Port not bound.":
pass
else:
if not url:
url = "http://%s:%s%s/api?" % (browserhost, cherryport, sabnzbd.cfg.url_base())
if new_instance or not check_for_sabnzbd(url, upload_nzbs, autobrowser):
# Bail out if we have fixed our ports after first start-up
if sabnzbd.cfg.fixed_ports():
abort_and_show_error(browserhost, cherryport)
# Find free port to bind
port = find_free_port(browserhost, cherryport)
if port > 0:
sabnzbd.cfg.cherryport.set(port)
cherryport = port
except:
# Something else wrong, probably badly specified host
abort_and_show_error(browserhost, cherryport, "49")
# We found a port, now we never check again
sabnzbd.cfg.fixed_ports.set(True)
# Logging-checks
logdir = sabnzbd.cfg.log_dir.get_path()
if fork and not logdir:
print("Error: I refuse to fork without a log directory!")
sys.exit(1)
if clean_up:
xlist = globber_full(logdir)
for x in xlist:
if RSS_FILE_NAME not in x:
try:
os.remove(x)
except:
pass
# Prevent the logger from raising exceptions
# primarily to reduce the fallout of Python issue 4749
logging.raiseExceptions = 0
# Log-related constants we always need
if logging_level is None:
logging_level = sabnzbd.cfg.log_level()
else:
sabnzbd.cfg.log_level.set(logging_level)
sabnzbd.LOGFILE = os.path.join(logdir, DEF_LOG_FILE)
logformat = "%(asctime)s::%(levelname)s::[%(module)s:%(lineno)d] %(message)s"
logger.setLevel(LOGLEVELS[logging_level + 1])
try:
if not no_file_log:
rollover_log = logging.handlers.RotatingFileHandler(
sabnzbd.LOGFILE, "a+", sabnzbd.cfg.log_size(), sabnzbd.cfg.log_backups()
)
rollover_log.setFormatter(logging.Formatter(logformat))
logger.addHandler(rollover_log)
except IOError:
print("Error:")
print("Can't write to logfile")
exit_sab(2)
# Fork on non-Windows processes
if fork and not sabnzbd.WIN32:
daemonize()
else:
if console_logging:
console = logging.StreamHandler()
console.setLevel(LOGLEVELS[logging_level + 1])
console.setFormatter(logging.Formatter(logformat))
logger.addHandler(console)
if no_file_log:
logging.info("Console logging only")
# Start SABnzbd
logging.info("--------------------------------")
logging.info("%s-%s", sabnzbd.MY_NAME, sabnzbd.__version__)
# See if we can get version from git when running an unknown revision
if sabnzbd.__baseline__ == "unknown":
try:
sabnzbd.__baseline__ = sabnzbd.misc.run_command(["git", "rev-parse", "--short", "HEAD"]).strip()
except:
pass
logging.info("Commit: %s", sabnzbd.__baseline__)
logging.info("Full executable path = %s", sabnzbd.MY_FULLNAME)
if sabnzbd.WIN32:
suffix = ""
if win64:
suffix = "(win64)"
try:
logging.info("Platform = %s %s", platform.platform(), suffix)
except:
logging.info("Platform = %s <unknown>", suffix)
else:
logging.info("Platform = %s", os.name)
logging.info("Python-version = %s", sys.version)
logging.info("Arguments = %s", sabnzbd.CMDLINE)
if sabnzbd.DOCKER:
logging.info("Running inside a docker container")
else:
logging.info("Not inside a docker container")
# Find encoding; relevant for external processing activities
logging.info("Preferred encoding = %s", sabnzbd.encoding.CODEPAGE)
# On Linux/FreeBSD/Unix "UTF-8" is strongly, strongly adviced:
if not sabnzbd.WIN32 and not sabnzbd.DARWIN and not ("utf-8" in sabnzbd.encoding.CODEPAGE.lower()):
logging.warning_helpful(
T(
"SABnzbd was started with encoding %s, this should be UTF-8. Expect problems with Unicoded file and directory names in downloads."
),
sabnzbd.encoding.CODEPAGE,
)
# SSL Information
logging.info("SSL version = %s", ssl.OPENSSL_VERSION)
# Load (extra) certificates if supplied by certifi
# This is optional and provided in the binaries
if importlib.util.find_spec("certifi") is not None:
import certifi
try:
os.environ["SSL_CERT_FILE"] = certifi.where()
logging.info("Certifi version: %s", certifi.__version__)
logging.info("Loaded additional certificates from: %s", os.environ["SSL_CERT_FILE"])
except:
# Sometimes the certificate file is blocked
logging.warning(T("Could not load additional certificates from certifi package"))
logging.info("Traceback: ", exc_info=True)
# Extra startup info
if sabnzbd.cfg.log_level() > 1:
# List the number of certificates available (can take up to 1.5 seconds)
ctx = ssl.create_default_context()
logging.debug("Available certificates: %s", repr(ctx.cert_store_stats()))
mylocalipv4 = localipv4()
if mylocalipv4:
logging.debug("My local IPv4 address = %s", mylocalipv4)
else:
logging.debug("Could not determine my local IPv4 address")
mypublicipv4 = publicipv4()
if mypublicipv4:
logging.debug("My public IPv4 address = %s", mypublicipv4)
else:
logging.debug("Could not determine my public IPv4 address")
myipv6 = ipv6()
if myipv6:
logging.debug("My IPv6 address = %s", myipv6)
else:
logging.debug("Could not determine my IPv6 address")
# Measure and log system performance measured by pystone and - if possible - CPU model
from sabnzbd.utils.getperformance import getpystone, getcpu
pystoneperf = getpystone()
if pystoneperf:
logging.debug("CPU Pystone available performance = %s", pystoneperf)
else:
logging.debug("CPU Pystone available performance could not be calculated")
cpumodel = getcpu() # Linux only
if cpumodel:
logging.debug("CPU model = %s", cpumodel)
logging.info("Using INI file %s", inifile)
if autobrowser is not None:
sabnzbd.cfg.autobrowser.set(autobrowser)
sabnzbd.initialize(pause, clean_up, repair=repair)
os.chdir(sabnzbd.DIR_PROG)
sabnzbd.WEB_DIR = identify_web_template(sabnzbd.cfg.web_dir, DEF_STDINTF, fix_webname(web_dir))
sabnzbd.WEB_DIR_CONFIG = identify_web_template(None, DEF_STDCONFIG, "")
sabnzbd.WIZARD_DIR = os.path.join(sabnzbd.DIR_INTERFACES, "wizard")
sabnzbd.WEB_COLOR = check_template_scheme(sabnzbd.cfg.web_color(), sabnzbd.WEB_DIR)
sabnzbd.cfg.web_color.set(sabnzbd.WEB_COLOR)
# Handle the several tray icons
if sabnzbd.cfg.win_menu() and not sabnzbd.DAEMON:
if sabnzbd.WIN32:
import sabnzbd.sabtray
sabnzbd.WINTRAY = sabnzbd.sabtray.SABTrayThread()
elif sabnzbd.LINUX_POWER and os.environ.get("DISPLAY"):
try:
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
import sabnzbd.sabtraylinux
sabnzbd.LINUXTRAY = sabnzbd.sabtraylinux.StatusIcon()
except:
logging.info("python3-gi not found, no SysTray.")
# Find external programs
sabnzbd.newsunpack.find_programs(sabnzbd.DIR_PROG)
print_modules()
# HTTPS certificate generation
https_cert = sabnzbd.cfg.https_cert.get_path()
https_key = sabnzbd.cfg.https_key.get_path()
https_chain = sabnzbd.cfg.https_chain.get_path()
if not (sabnzbd.cfg.https_chain() and os.path.exists(https_chain)):
https_chain = None
if enable_https:
# If either the HTTPS certificate or key do not exist, make some self-signed ones.
if not (https_cert and os.path.exists(https_cert)) or not (https_key and os.path.exists(https_key)):
create_https_certificates(https_cert, https_key)
if not (os.path.exists(https_cert) and os.path.exists(https_key)):
logging.warning(T("Disabled HTTPS because of missing CERT and KEY files"))
enable_https = False
sabnzbd.cfg.enable_https.set(False)
# So the cert and key files do exist, now let's check if they are valid:
trialcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
trialcontext.load_cert_chain(https_cert, https_key)
logging.info("HTTPS keys are OK")
except:
logging.warning(T("Disabled HTTPS because of invalid CERT and KEY files"))
logging.info("Traceback: ", exc_info=True)
enable_https = False
sabnzbd.cfg.enable_https.set(False)
# Starting of the webserver
# Determine if this system has multiple definitions for 'localhost'
hosts = all_localhosts()
multilocal = len(hosts) > 1 and cherryhost in ("localhost", "0.0.0.0")
# For 0.0.0.0 CherryPy will always pick IPv4, so make sure the secondary localhost is IPv6
if multilocal and cherryhost == "0.0.0.0" and hosts[1] == "127.0.0.1":
hosts[1] = "::1"
# The Windows binary requires numeric localhost as primary address
if cherryhost == "localhost":
cherryhost = hosts[0]
if enable_https:
if https_port:
# Extra HTTP port for primary localhost
attach_server(cherryhost, cherryport)
if multilocal:
# Extra HTTP port for secondary localhost
attach_server(hosts[1], cherryport)
# Extra HTTPS port for secondary localhost
attach_server(hosts[1], https_port, https_cert, https_key, https_chain)
cherryport = https_port
elif multilocal:
# Extra HTTPS port for secondary localhost
attach_server(hosts[1], cherryport, https_cert, https_key, https_chain)
cherrypy.config.update(
{
"server.ssl_module": "builtin",
"server.ssl_certificate": https_cert,
"server.ssl_private_key": https_key,
"server.ssl_certificate_chain": https_chain,
}
)
elif multilocal:
# Extra HTTP port for secondary localhost
attach_server(hosts[1], cherryport)
if no_login:
sabnzbd.cfg.username.set("")
sabnzbd.cfg.password.set("")
mime_gzip = (
"text/*",
"application/javascript",
"application/x-javascript",
"application/json",
"application/xml",
"application/vnd.ms-fontobject",
"application/font*",
"image/svg+xml",
)
cherrypy.config.update(
{
"server.environment": "production",
"server.socket_host": cherryhost,
"server.socket_port": cherryport,
"server.shutdown_timeout": 0,
"log.screen": False,
"engine.autoreload.on": False,
"tools.encode.on": True,
"tools.gzip.on": True,
"tools.gzip.mime_types": mime_gzip,
"request.show_tracebacks": True,
"error_page.401": sabnzbd.panic.error_page_401,
"error_page.404": sabnzbd.panic.error_page_404,
}
)
# Do we want CherryPy Logging? Cannot be done via the config
if cherrypylogging:
sabnzbd.WEBLOGFILE = os.path.join(logdir, DEF_LOG_CHERRY)
cherrypy.log.screen = True
cherrypy.log.access_log.propagate = True
cherrypy.log.access_file = str(sabnzbd.WEBLOGFILE)
else:
cherrypy.log.access_log.propagate = False
# Force mimetypes (OS might overwrite them)
forced_mime_types = {"css": "text/css", "js": "application/javascript"}
static = {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(sabnzbd.WEB_DIR, "static"),
"tools.staticdir.content_types": forced_mime_types,
}
staticcfg = {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(sabnzbd.WEB_DIR_CONFIG, "staticcfg"),
"tools.staticdir.content_types": forced_mime_types,
}
wizard_static = {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(sabnzbd.WIZARD_DIR, "static"),
"tools.staticdir.content_types": forced_mime_types,
}
appconfig = {
"/api": {
"tools.auth_basic.on": False,
"tools.response_headers.on": True,
"tools.response_headers.headers": [("Access-Control-Allow-Origin", "*")],
},
"/static": static,
"/wizard/static": wizard_static,
"/favicon.ico": {
"tools.staticfile.on": True,
"tools.staticfile.filename": os.path.join(sabnzbd.WEB_DIR_CONFIG, "staticcfg", "ico", "favicon.ico"),
},
"/staticcfg": staticcfg,
}
# Make available from both URLs
main_page = sabnzbd.interface.MainPage()
cherrypy.Application.relative_urls = "server"
cherrypy.tree.mount(main_page, "/", config=appconfig)
cherrypy.tree.mount(main_page, sabnzbd.cfg.url_base(), config=appconfig)
# Set authentication for CherryPy
sabnzbd.interface.set_auth(cherrypy.config)
logging.info("Starting web-interface on %s:%s", cherryhost, cherryport)
sabnzbd.cfg.log_level.callback(guard_loglevel)
try:
cherrypy.engine.start()
except:
logging.error(T("Failed to start web-interface: "), exc_info=True)
abort_and_show_error(browserhost, cherryport)
# Wait for server to become ready
cherrypy.engine.wait(cherrypy.process.wspbus.states.STARTED)
if sabnzbd.WIN32:
if enable_https:
mode = "s"
else:
mode = ""
api_url = "http%s://%s:%s%s/api?apikey=%s" % (
mode,
browserhost,
cherryport,
sabnzbd.cfg.url_base(),
sabnzbd.cfg.api_key(),
)
# Write URL directly to registry
set_connection_info(api_url)
if pid_path or pid_file:
sabnzbd.pid_file(pid_path, pid_file, cherryport)
# Stop here in case of fatal errors
if sabnzbd.NO_DOWNLOADING:
return
# Start all SABnzbd tasks
logging.info("Starting %s-%s", sabnzbd.MY_NAME, sabnzbd.__version__)
try:
sabnzbd.start()
except:
logging.exception("Failed to start %s-%s", sabnzbd.MY_NAME, sabnzbd.__version__)
sabnzbd.halt()
# Upload any nzb/zip/rar/nzb.gz/nzb.bz2 files from file association
if upload_nzbs:
for upload_nzb in upload_nzbs:
sabnzbd.add_nzbfile(upload_nzb)
# Set URL for browser
if enable_https:
browser_url = "https://%s:%s%s" % (browserhost, cherryport, sabnzbd.cfg.url_base())
else:
browser_url = "http://%s:%s%s" % (browserhost, cherryport, sabnzbd.cfg.url_base())
sabnzbd.BROWSER_URL = browser_url
if not autorestarted:
launch_a_browser(browser_url)
notifier.send_notification("SABnzbd", T("SABnzbd %s started") % sabnzbd.__version__, "startup")
# Now's the time to check for a new version
check_latest_version()
autorestarted = False
# bonjour/zeroconf needs an ip. Lets try to find it.
z_host = localipv4() # IPv4 address of the LAN interface. This is the normal use case
if not z_host:
# None, so no network / default route, so let's set to ...
z_host = "127.0.0.1"
elif probablyipv4(cherryhost) and cherryhost not in LOCALHOSTS + ("0.0.0.0", "::"):
# a hard-configured cherryhost other than the usual, so let's take that (good or wrong)
z_host = cherryhost
logging.debug("bonjour/zeroconf using host: %s", z_host)
sabnzbd.zconfig.set_bonjour(z_host, cherryport)
# Have to keep this running, otherwise logging will terminate
timer = 0
while not sabnzbd.SABSTOP:
time.sleep(3)
# Check for loglevel changes
if LOG_FLAG:
LOG_FLAG = False
level = LOGLEVELS[sabnzbd.cfg.log_level() + 1]
logger.setLevel(level)
if console_logging:
console.setLevel(level)
# 30 sec polling tasks
if timer > 9:
timer = 0
# Keep OS awake (if needed)
sabnzbd.keep_awake()
# Restart scheduler (if needed)
sabnzbd.Scheduler.restart(plan_restart=False)
# Save config (if needed)
config.save_config()
# Check the threads
if not sabnzbd.check_all_tasks():
autorestarted = True
sabnzbd.TRIGGER_RESTART = True
else:
timer += 1
# 3 sec polling tasks
# Check for auto-restart request
# Or special restart cases like Mac and WindowsService
if sabnzbd.TRIGGER_RESTART:
logging.info("Performing triggered restart")
# Shutdown
sabnzbd.shutdown_program()
if sabnzbd.Downloader.paused:
sabnzbd.RESTART_ARGS.append("-p")
if autorestarted:
sabnzbd.RESTART_ARGS.append("--autorestarted")
sys.argv = sabnzbd.RESTART_ARGS
os.chdir(org_dir)
# If macOS frozen restart of app instead of embedded python
if hasattr(sys, "frozen") and sabnzbd.DARWIN:
# [[NSProcessInfo processInfo] processIdentifier]]
# logging.info("%s" % (NSProcessInfo.processInfo().processIdentifier()))
my_pid = os.getpid()
my_name = sabnzbd.MY_FULLNAME.replace("/Contents/MacOS/SABnzbd", "")
my_args = " ".join(sys.argv[1:])
cmd = 'kill -9 %s && open "%s" --args %s' % (my_pid, my_name, my_args)
logging.info("Launching: %s", cmd)
os.system(cmd)
elif sabnzbd.WIN_SERVICE:
# Use external service handler to do the restart
# Wait 5 seconds to clean up
subprocess.Popen("timeout 5 & sc start SABnzbd", shell=True)
else:
cherrypy.engine._do_execv()
config.save_config()
if sabnzbd.WINTRAY:
sabnzbd.WINTRAY.terminate = True
if sabnzbd.WIN32:
del_connection_info()
# Send our final goodbyes!
notifier.send_notification("SABnzbd", T("SABnzbd shutdown finished"), "startup")
logging.info("Leaving SABnzbd")
sys.stderr.flush()
sys.stdout.flush()
sabnzbd.pid_file()
if hasattr(sys, "frozen") and sabnzbd.DARWIN:
try:
AppHelper.stopEventLoop()
except:
# Failing AppHelper libary!
os._exit(0)
elif sabnzbd.WIN_SERVICE:
# Do nothing, let service handle it
pass
else:
os._exit(0)
##############################################################################
# Windows Service Support
##############################################################################
if sabnzbd.WIN32:
import servicemanager
class SABnzbd(win32serviceutil.ServiceFramework):
""" Win32 Service Handler """
_svc_name_ = "SABnzbd"
_svc_display_name_ = "SABnzbd Binary Newsreader"
_svc_deps_ = ["EventLog", "Tcpip"]
_svc_description_ = (
"Automated downloading from Usenet. "
'Set to "automatic" to start the service at system startup. '
"You may need to login with a real user account when you need "
"access to network shares."
)
# Only SABnzbd-console.exe can print to the console, so the service is installed
# from there. But we run SABnzbd.exe so nothing is logged. Logging can cause the
# Windows Service to stop because the output buffers are full.
if hasattr(sys, "frozen"):
_exe_name_ = "SABnzbd.exe"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
sabnzbd.WIN_SERVICE = self
def SvcDoRun(self):
msg = "SABnzbd-service %s" % sabnzbd.__version__
self.Logger(servicemanager.PYS_SERVICE_STARTED, msg + " has started")
sys.argv = get_serv_parms(self._svc_name_)
main()
self.Logger(servicemanager.PYS_SERVICE_STOPPED, msg + " has stopped")
def SvcStop(self):
sabnzbd.shutdown_program()
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def Logger(self, state, msg):
win32evtlogutil.ReportEvent(
self._svc_display_name_, state, 0, servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, msg)
)
def ErrLogger(self, msg, text):
win32evtlogutil.ReportEvent(
self._svc_display_name_,
servicemanager.PYS_SERVICE_STOPPED,
0,
servicemanager.EVENTLOG_ERROR_TYPE,
(self._svc_name_, msg),
text,
)
SERVICE_MSG = """
You may need to set additional Service parameters!
Verify the settings in Windows Services (services.msc).
https://sabnzbd.org/wiki/advanced/sabnzbd-as-a-windows-service
"""
def handle_windows_service():
"""Handle everything for Windows Service
Returns True when any service commands were detected or
when we have started as a service.
"""
# Detect if running as Windows Service (only Vista and above!)
# Adapted from https://stackoverflow.com/a/55248281/5235502
# Only works when run from the exe-files
if hasattr(sys, "frozen") and win32ts.ProcessIdToSessionId(win32api.GetCurrentProcessId()) == 0:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(SABnzbd)
servicemanager.StartServiceCtrlDispatcher()
return True
# Handle installation and other options
service, sab_opts, serv_opts, _upload_nzbs = commandline_handler()
if service:
if service in ("install", "update"):
# In this case check for required parameters
path = get_f_option(sab_opts)
if not path:
print(("The -f <path> parameter is required.\n" "Use: -f <path> %s" % service))
return True
# First run the service installed, because this will
# set the service key in the Registry
win32serviceutil.HandleCommandLine(SABnzbd, argv=serv_opts)
# Add our own parameter to the Registry
if set_serv_parms(SABnzbd._svc_name_, sab_opts):
print(SERVICE_MSG)
else:
print("ERROR: Cannot set required registry info.")
else:
# Pass the other commands directly
win32serviceutil.HandleCommandLine(SABnzbd)
return bool(service)
##############################################################################
# Platform specific startup code
##############################################################################
if __name__ == "__main__":
# We can only register these in the main thread
signal.signal(signal.SIGINT, sabnzbd.sig_handler)
signal.signal(signal.SIGTERM, sabnzbd.sig_handler)
if sabnzbd.WIN32:
if not handle_windows_service():
main()
elif sabnzbd.DARWIN and sabnzbd.FOUNDATION:
# macOS binary runner
from threading import Thread
from PyObjCTools import AppHelper
from AppKit import NSApplication
from sabnzbd.osxmenu import SABnzbdDelegate
# Need to run the main application in separate thread because the eventLoop
# has to be in the main thread. The eventLoop is required for the menu.
# This code is made with trial-and-error, please improve!
class startApp(Thread):
def run(self):
main()
AppHelper.stopEventLoop()
sabApp = startApp()
sabApp.start()
# Initialize the menu
shared_app = NSApplication.sharedApplication()
sabnzbd_menu = SABnzbdDelegate.alloc().init()
shared_app.setDelegate_(sabnzbd_menu)
# Build the menu
sabnzbd_menu.awakeFromNib()
# Run the main eventloop
AppHelper.runEventLoop()
else:
main()
| [] | [] | [
"SSL_CERT_FILE",
"AppData",
"USERPROFILE",
"HOME",
"SABnzbd",
"DISPLAY"
] | [] | ["SSL_CERT_FILE", "AppData", "USERPROFILE", "HOME", "SABnzbd", "DISPLAY"] | python | 6 | 0 | |
executor/executor_test.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"flag"
"fmt"
"math"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
gofail "github.com/etcd-io/gofail/runtime"
"github.com/golang/protobuf/proto"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
"github.com/pingcap/tidb/util/timeutil"
"github.com/pingcap/tipb/go-tipb"
"golang.org/x/net/context"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(&logutil.LogConfig{
Level: logLevel,
})
TestingT(t)
}
var _ = Suite(&testSuite{})
var _ = Suite(&testContextOptionSuite{})
var _ = Suite(&testBypassSuite{})
type testSuite struct {
cluster *mocktikv.Cluster
mvccStore mocktikv.MVCCStore
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context
autoIDStep int64
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
func (s *testSuite) SetUpSuite(c *C) {
testleak.BeforeTest()
s.autoIDStep = autoid.GetStep()
autoid.SetStep(5000)
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.mvccStore = mocktikv.MustNewMVCCStore()
store, err := mockstore.NewMockTikvStore(
mockstore.WithCluster(s.cluster),
mockstore.WithMVCCStore(s.mvccStore),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.SetStatsLease(0)
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *testSuite) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
autoid.SetStep(s.autoIDStep)
testleak.AfterTest(c)()
}
func (s *testSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuite) TestAdmin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1))")
tk.MustExec("insert admin_test (c1) values (1),(2),(NULL)")
ctx := context.Background()
// cancel DDL jobs test
r, err := tk.Exec("admin cancel ddl jobs 1")
c.Assert(err, IsNil, Commentf("err %v", err))
chk := r.NewChunk()
err = r.Next(ctx, chk)
c.Assert(err, IsNil)
row := chk.GetRow(0)
c.Assert(row.Len(), Equals, 2)
c.Assert(row.GetString(0), Equals, "1")
c.Assert(row.GetString(1), Equals, "error: Can't find this job")
r, err = tk.Exec("admin show ddl")
c.Assert(err, IsNil)
chk = r.NewChunk()
err = r.Next(ctx, chk)
c.Assert(err, IsNil)
row = chk.GetRow(0)
c.Assert(row.Len(), Equals, 4)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
ddlInfo, err := admin.GetDDLInfo(txn)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, ddlInfo.SchemaVer)
// TODO: Pass this test.
// rowOwnerInfos := strings.Split(row.Data[1].GetString(), ",")
// ownerInfos := strings.Split(ddlInfo.Owner.String(), ",")
// c.Assert(rowOwnerInfos[0], Equals, ownerInfos[0])
c.Assert(row.GetString(2), Equals, "")
chk = r.NewChunk()
err = r.Next(ctx, chk)
c.Assert(err, IsNil)
c.Assert(chk.NumRows() == 0, IsTrue)
err = txn.Rollback()
c.Assert(err, IsNil)
// show DDL jobs test
r, err = tk.Exec("admin show ddl jobs")
c.Assert(err, IsNil)
chk = r.NewChunk()
err = r.Next(ctx, chk)
c.Assert(err, IsNil)
row = chk.GetRow(0)
c.Assert(row.Len(), Equals, 10)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
c.Assert(len(historyJobs), Greater, 1)
c.Assert(len(row.GetString(1)), Greater, 0)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
r, err = tk.Exec("admin show ddl jobs 20")
c.Assert(err, IsNil)
chk = r.NewChunk()
err = r.Next(ctx, chk)
c.Assert(err, IsNil)
row = chk.GetRow(0)
c.Assert(row.Len(), Equals, 10)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
// show DDL job queries test
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test2")
tk.MustExec("create table admin_test2 (c1 int, c2 int, c3 int default 1, index (c1))")
result := tk.MustQuery(`admin show ddl job queries 1, 1, 1`)
result.Check(testkit.Rows())
result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`)
result.Check(testkit.Rows())
historyJob, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJob[0].ID))
result.Check(testkit.Rows(historyJob[0].Query))
c.Assert(err, IsNil)
// check table test
tk.MustExec("create table admin_test1 (c1 int, c2 int default 1, index (c1))")
tk.MustExec("insert admin_test1 (c1) values (21),(22)")
r, err = tk.Exec("admin check table admin_test, admin_test1")
c.Assert(err, IsNil)
c.Assert(r, IsNil)
// error table name
r, err = tk.Exec("admin check table admin_test_error")
c.Assert(err, NotNil)
// different index values
sctx := tk.Se.(sessionctx.Context)
dom := domain.GetDomain(sctx)
is := dom.InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test"))
c.Assert(err, IsNil)
c.Assert(tb.Indices(), HasLen, 1)
_, err = tb.Indices()[0].Create(mock.NewContext(), txn, types.MakeDatums(int64(10)), 1)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
r, err_admin := tk.Exec("admin check table admin_test")
c.Assert(err_admin, NotNil)
if config.CheckTableBeforeDrop {
r, err = tk.Exec("drop table admin_test")
c.Assert(err.Error(), Equals, err_admin.Error())
// Drop inconsistency index.
tk.MustExec("alter table admin_test drop index c1")
tk.MustExec("admin check table admin_test")
}
// checksum table test
tk.MustExec("create table checksum_with_index (id int, count int, PRIMARY KEY(id), KEY(count))")
tk.MustExec("create table checksum_without_index (id int, count int, PRIMARY KEY(id))")
r, err = tk.Exec("admin checksum table checksum_with_index, checksum_without_index")
c.Assert(err, IsNil)
res := tk.ResultSetToResult(r, Commentf("admin checksum table"))
// Mocktikv returns 1 for every table/index scan, then we will xor the checksums of a table.
// For "checksum_with_index", we have two checksums, so the result will be 1^1 = 0.
// For "checksum_without_index", we only have one checksum, so the result will be 1.
res.Sort().Check(testkit.Rows("test checksum_with_index 0 2 2", "test checksum_without_index 1 1 1"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned primary key, b int, c int, index idx(a, b));")
tk.MustExec("insert into t values(1, 1, 1)")
tk.MustExec("admin check table t")
tk.MustExec("drop table if exists t1;")
tk.MustExec("CREATE TABLE t1 (c2 BOOL, PRIMARY KEY (c2));")
tk.MustExec("INSERT INTO t1 SET c2 = '0';")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c3 DATETIME NULL DEFAULT '2668-02-03 17:19:31';")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx2 (c3);")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c4 bit(10) default 127;")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx3 (c4);")
tk.MustExec("admin check table t1;")
// For add index on virtual column
tk.MustExec("drop table if exists t1;")
tk.MustExec(`create table t1 (
a int as (JSON_EXTRACT(k,'$.a')),
c double as (JSON_EXTRACT(k,'$.c')),
d decimal(20,10) as (JSON_EXTRACT(k,'$.d')),
e char(10) as (JSON_EXTRACT(k,'$.e')),
f date as (JSON_EXTRACT(k,'$.f')),
g time as (JSON_EXTRACT(k,'$.g')),
h datetime as (JSON_EXTRACT(k,'$.h')),
i timestamp as (JSON_EXTRACT(k,'$.i')),
j year as (JSON_EXTRACT(k,'$.j')),
k json);`)
tk.MustExec("insert into t1 set k='{\"a\": 100,\"c\":1.234,\"d\":1.2340000000,\"e\":\"abcdefg\",\"f\":\"2018-09-28\",\"g\":\"12:59:59\",\"h\":\"2018-09-28 12:59:59\",\"i\":\"2018-09-28 16:40:33\",\"j\":\"2018\"}';")
tk.MustExec("alter table t1 add index idx_a(a);")
tk.MustExec("alter table t1 add index idx_c(c);")
tk.MustExec("alter table t1 add index idx_d(d);")
tk.MustExec("alter table t1 add index idx_e(e);")
tk.MustExec("alter table t1 add index idx_f(f);")
tk.MustExec("alter table t1 add index idx_g(g);")
tk.MustExec("alter table t1 add index idx_h(h);")
tk.MustExec("alter table t1 add index idx_j(j);")
tk.MustExec("alter table t1 add index idx_i(i);")
tk.MustExec("alter table t1 add index idx_m(a,c,d,e,f,g,h,i,j);")
tk.MustExec("admin check table t1;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("CREATE TABLE t1 (c1 int);")
tk.MustExec("INSERT INTO t1 SET c1 = 1;")
tk.MustExec("ALTER TABLE t1 ADD COLUMN cc1 CHAR(36) NULL DEFAULT '';")
tk.MustExec("ALTER TABLE t1 ADD COLUMN cc2 VARCHAR(36) NULL DEFAULT ''")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx1 (cc1);")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx2 (cc2);")
tk.MustExec("admin check table t1;")
}
func (s *testSuite) fillData(tk *testkit.TestKit, table string) {
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table))
// insert data
tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table))
tk.CheckExecResult(1, 0)
tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table))
tk.CheckExecResult(1, 0)
}
type testCase struct {
data1 []byte
data2 []byte
expected []string
restData []byte
}
func checkCases(tests []testCase, ld *executor.LoadDataInfo,
c *C, tk *testkit.TestKit, ctx sessionctx.Context, selectSQL, deleteSQL string) {
origin := ld.IgnoreLines
for _, tt := range tests {
ld.IgnoreLines = origin
c.Assert(ctx.NewTxn(), IsNil)
ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true
ctx.GetSessionVars().StmtCtx.BadNullAsWarning = true
data, reachLimit, err1 := ld.InsertData(tt.data1, tt.data2)
c.Assert(err1, IsNil)
c.Assert(reachLimit, IsFalse)
if tt.restData == nil {
c.Assert(data, HasLen, 0,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
} else {
c.Assert(data, DeepEquals, tt.restData,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
}
ctx.StmtCommit()
err1 = ctx.Txn(true).Commit(context.Background())
c.Assert(err1, IsNil)
r := tk.MustQuery(selectSQL)
r.Check(testutil.RowsWithSep("|", tt.expected...))
tk.MustExec(deleteSQL)
}
}
func (s *testSuite) TestSelectWithoutFrom(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("select 1 + 2*3;")
r.Check(testkit.Rows("7"))
r = tk.MustQuery(`select _utf8"string";`)
r.Check(testkit.Rows("string"))
r = tk.MustQuery("select 1 order by 1;")
r.Check(testkit.Rows("1"))
}
// TestSelectBackslashN Issue 3685.
func (s *testSuite) TestSelectBackslashN(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select \N;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "NULL")
sql = `select "\N";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
tk.MustExec("use test;")
tk.MustExec("create table test (`\\N` int);")
tk.MustExec("insert into test values (1);")
tk.CheckExecResult(1, 0)
sql = "select * from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = `select \N from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select (\N) from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = "select `\\N` from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = "select (`\\N`) from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = `select '\N' from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
sql = `select ('\N') from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
}
// TestSelectNull Issue #4053.
func (s *testSuite) TestSelectNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select nUll;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select (null);`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select null+NULL;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `null+NULL`)
}
// TestSelectStringLiteral Issue #3686.
func (s *testSuite) TestSelectStringLiteral(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select 'abc';`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
sql = `select (('abc'));`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
sql = `select 'abc'+'def';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("0"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `'abc'+'def'`)
// Below checks whether leading invalid chars are trimmed.
sql = "select '\n';"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("\n"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "")
sql = "select '\t col';" // Lowercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "col")
sql = "select '\t Col';" // Uppercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "Col")
sql = "select '\n\t 中文 col';" // Chinese char is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "中文 col")
sql = "select ' \r\n .col';" // Punctuation is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, ".col")
sql = "select ' 😆col';" // Emoji is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "😆col")
// Below checks whether trailing invalid chars are preserved.
sql = `select 'abc ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc ")
sql = `select ' abc 123 ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc 123 ")
// Issue #4239.
sql = `select 'a' ' ' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
sql = `select 'a' " " "string";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
sql = `select 'string' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("stringstring"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "string")
sql = `select "ss" "a";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssab"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" ' ' "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" ' ' "b" ' ' "d";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b d"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
}
func (s *testSuite) TestSelectLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_limit")
tk.MustExec("insert INTO select_limit VALUES (3, \"hello\");")
tk.CheckExecResult(1, 0)
tk.MustExec("insert INTO select_limit VALUES (4, \"hello\");")
tk.CheckExecResult(1, 0)
r := tk.MustQuery("select * from select_limit limit 1;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from (select * from select_limit limit 1) k where id != 1;")
r.Check(testkit.Rows())
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 1;")
r.Check(testkit.Rows("2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 3;")
r.Check(testkit.Rows("4 hello"))
_, err := tk.Exec("select * from select_limit limit 18446744073709551616 offset 3;")
c.Assert(err, NotNil)
}
func (s *testSuite) TestSelectOrderBy(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_order_test")
// Test star field
r := tk.MustQuery("select * from select_order_test where id = 1 order by id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from select_order_test order by id desc limit 1 ")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from select_order_test order by id + 1 desc limit 1 ")
r.Check(testkit.Rows("2"))
// Test limit
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit
r = tk.MustQuery("select id as c1, name from select_order_test order by 2, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 100 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello"))
// Test offset overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 100;")
r.Check(testkit.Rows())
// Test limit exceeds int range.
r = tk.MustQuery("select id from select_order_test order by name, id limit 18446744073709551615;")
r.Check(testkit.Rows("1", "2"))
// Test multiple field
r = tk.MustQuery("select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit + order by
for i := 3; i <= 10; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (10086, \"hi\");")
for i := 11; i <= 20; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"hh\");", i))
}
for i := 21; i <= 30; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (1501, \"aa\");")
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 3;")
r.Check(testkit.Rows("11 hh"))
tk.MustExec("drop table select_order_test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (1, 3)")
r = tk.MustQuery("select 1-d as d from t order by d;")
r.Check(testkit.Rows("-2", "-1", "0"))
r = tk.MustQuery("select 1-d as d from t order by d + 1;")
r.Check(testkit.Rows("0", "-1", "-2"))
r = tk.MustQuery("select t.d from t order by d;")
r.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, c int)")
tk.MustExec("insert t values (1, 2, 3)")
r = tk.MustQuery("select b from (select a,b from t order by a,c) t")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select b from (select a,b from t order by a,c limit 1) t")
r.Check(testkit.Rows("2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(1, 1), (2, 2)")
tk.MustQuery("select * from t where 1 order by b").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select * from t where a between 1 and 2 order by a desc").Check(testkit.Rows("2 2", "1 1"))
// Test double read and topN is pushed down to first read plannercore.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values(1, 3, 1)")
tk.MustExec("insert into t values(2, 2, 2)")
tk.MustExec("insert into t values(3, 1, 3)")
tk.MustQuery("select * from t use index(idx) order by a desc limit 1").Check(testkit.Rows("3 1 3"))
// Test double read which needs to keep order.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key b (b))")
tk.Se.GetSessionVars().IndexLookupSize = 3
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, 10-i))
}
tk.MustQuery("select a from t use index(b) order by b").Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
}
func (s *testSuite) TestOrderBy(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 int, c2 int, c3 varchar(20))")
tk.MustExec("insert into t values (1, 2, 'abc'), (2, 1, 'bcd')")
// Fix issue https://github.com/pingcap/tidb/issues/337
tk.MustQuery("select c1 as a, c1 as b from t order by c1").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select c1 as a, t.c1 as a from t order by a desc").Check(testkit.Rows("2 2", "1 1"))
tk.MustQuery("select c1 as c2 from t order by c2").Check(testkit.Rows("1", "2"))
tk.MustQuery("select sum(c1) from t order by sum(c1)").Check(testkit.Rows("3"))
tk.MustQuery("select c1 as c2 from t order by c2 + 1").Check(testkit.Rows("2", "1"))
// Order by position.
tk.MustQuery("select * from t order by 1").Check(testkit.Rows("1 2 abc", "2 1 bcd"))
tk.MustQuery("select * from t order by 2").Check(testkit.Rows("2 1 bcd", "1 2 abc"))
// Order by binary.
tk.MustQuery("select c1, c3 from t order by binary c1 desc").Check(testkit.Rows("2 bcd", "1 abc"))
tk.MustQuery("select c1, c2 from t order by binary c3").Check(testkit.Rows("1 2", "2 1"))
}
func (s *testSuite) TestSelectErrorRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
_, err := tk.Exec("select row(1, 1) from test")
c.Assert(err, NotNil)
_, err = tk.Exec("select * from test group by row(1, 1);")
c.Assert(err, NotNil)
_, err = tk.Exec("select * from test order by row(1, 1);")
c.Assert(err, NotNil)
_, err = tk.Exec("select * from test having row(1, 1);")
c.Assert(err, NotNil)
_, err = tk.Exec("select (select 1, 1) from test;")
c.Assert(err, NotNil)
_, err = tk.Exec("select * from test group by (select 1, 1);")
c.Assert(err, NotNil)
_, err = tk.Exec("select * from test order by (select 1, 1);")
c.Assert(err, NotNil)
_, err = tk.Exec("select * from test having (select 1, 1);")
c.Assert(err, NotNil)
}
// TestIssue2612 is related with https://github.com/pingcap/tidb/issues/2612
func (s *testSuite) TestIssue2612(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (
create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00',
finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00');`)
tk.MustExec(`insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22');`)
rs, err := tk.Exec(`select timediff(finish_at, create_at) from t;`)
c.Assert(err, IsNil)
chk := rs.NewChunk()
err = rs.Next(context.Background(), chk)
c.Assert(err, IsNil)
c.Assert(chk.GetRow(0).GetDuration(0, 0).String(), Equals, "-46:09:02")
}
// TestIssue345 is related with https://github.com/pingcap/tidb/issues/345
func (s *testSuite) TestIssue345(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (c1 int);`)
tk.MustExec(`create table t2 (c2 int);`)
tk.MustExec(`insert into t1 values (1);`)
tk.MustExec(`insert into t2 values (2);`)
tk.MustExec(`update t1, t2 set t1.c1 = 2, t2.c2 = 1;`)
tk.MustExec(`update t1, t2 set c1 = 2, c2 = 1;`)
tk.MustExec(`update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1;`)
// Check t1 content
r := tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("2"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("1"))
tk.MustExec(`update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2;`)
// Check t1 content
r = tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("1"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("2"))
_, err := tk.Exec(`update t1 as a, t2 set t1.c1 = 10;`)
c.Assert(err, NotNil)
}
func (s *testSuite) TestIssue5055(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (a int);`)
tk.MustExec(`create table t2 (a int);`)
tk.MustExec(`insert into t1 values(1);`)
tk.MustExec(`insert into t2 values(1);`)
result := tk.MustQuery("select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;")
result.Check(testkit.Rows("1 1"))
}
func (s *testSuite) TestUnion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
testSQL := `drop table if exists union_test; create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `drop table if exists union_test;`
tk.MustExec(testSQL)
testSQL = `create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `insert union_test values (1),(2)`
tk.MustExec(testSQL)
testSQL = `select * from (select id from union_test union select id from union_test) t order by id;`
r := tk.MustQuery(testSQL)
r.Check(testkit.Rows("1", "2"))
r = tk.MustQuery("select 1 union all select 1")
r.Check(testkit.Rows("1", "1"))
r = tk.MustQuery("select 1 union all select 1 union select 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1, 1")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from union_test union all (select 1) order by id desc")
r.Check(testkit.Rows("2", "1", "1"))
r = tk.MustQuery("select id as a from union_test union (select 1) order by a desc")
r.Check(testkit.Rows("2", "1"))
r = tk.MustQuery(`select null as a union (select "abc") order by a`)
r.Check(testkit.Rows("<nil>", "abc"))
r = tk.MustQuery(`select "abc" as a union (select 1) order by a`)
r.Check(testkit.Rows("1", "abc"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c int, d int)")
tk.MustExec("insert t1 values (NULL, 1)")
tk.MustExec("insert t1 values (1, 1)")
tk.MustExec("insert t1 values (1, 2)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (1, 3)")
tk.MustExec("insert t2 values (1, 1)")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (c int, d int)")
tk.MustExec("insert t3 values (3, 2)")
tk.MustExec("insert t3 values (4, 3)")
r = tk.MustQuery(`select sum(c1), c2 from (select c c1, d c2 from t1 union all select d c1, c c2 from t2 union all select c c1, d c2 from t3) x group by c2 order by c2`)
r.Check(testkit.Rows("5 1", "4 2", "4 3"))
tk.MustExec("drop table if exists t1, t2, t3")
tk.MustExec("create table t1 (a int primary key)")
tk.MustExec("create table t2 (a int primary key)")
tk.MustExec("create table t3 (a int primary key)")
tk.MustExec("insert t1 values (7), (8)")
tk.MustExec("insert t2 values (1), (9)")
tk.MustExec("insert t3 values (2), (3)")
r = tk.MustQuery("select * from t1 union all select * from t2 union all (select * from t3) order by a limit 2")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert t1 values (2), (1)")
tk.MustExec("insert t2 values (3), (4)")
r = tk.MustQuery("select * from t1 union all (select * from t2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select (select * from t1 where a != t.a union all (select * from t2 where a != t.a) order by a limit 1) from t1 t")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int unsigned primary key auto_increment, c1 int, c2 int, index c1_c2 (c1, c2))")
tk.MustExec("insert into t (c1, c2) values (1, 1)")
tk.MustExec("insert into t (c1, c2) values (1, 2)")
tk.MustExec("insert into t (c1, c2) values (2, 3)")
r = tk.MustQuery("select * from (select * from t where t.c1 = 1 union select * from t where t.id = 1) s order by s.id")
r.Check(testkit.Rows("1 1 1", "2 1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (f1 DATE)")
tk.MustExec("INSERT INTO t VALUES ('1978-11-26')")
r = tk.MustQuery("SELECT f1+0 FROM t UNION SELECT f1+0 FROM t")
r.Check(testkit.Rows("19781126"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int, b int)")
tk.MustExec("INSERT INTO t VALUES ('1', '1')")
r = tk.MustQuery("select b from (SELECT * FROM t UNION ALL SELECT a, b FROM t order by a) t")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DECIMAL(4,2))")
tk.MustExec("INSERT INTO t VALUE(12.34)")
r = tk.MustQuery("SELECT 1 AS c UNION select a FROM t")
r.Sort().Check(testkit.Rows("1.00", "12.34"))
// #issue3771
r = tk.MustQuery("SELECT 'a' UNION SELECT CONCAT('a', -4)")
r.Sort().Check(testkit.Rows("a", "a-4"))
// test race
tk.MustQuery("SELECT @x:=0 UNION ALL SELECT @x:=0 UNION ALL SELECT @x")
// test field tp
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("CREATE TABLE t1 (a date)")
tk.MustExec("CREATE TABLE t2 (a date)")
tk.MustExec("SELECT a from t1 UNION select a FROM t2")
tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + " `a` date DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Move from session test.
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c double);")
tk.MustExec("create table t2 (c double);")
tk.MustExec("insert into t1 value (73);")
tk.MustExec("insert into t2 value (930);")
// If set unspecified column flen to 0, it will cause bug in union.
// This test is used to prevent the bug reappear.
tk.MustQuery("select c from t1 union (select c from t2) order by c").Check(testkit.Rows("73", "930"))
// issue 5703
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a date)")
tk.MustExec("insert into t value ('2017-01-01'), ('2017-01-02')")
r = tk.MustQuery("(select a from t where a < 0) union (select a from t where a > 0) order by a")
r.Check(testkit.Rows("2017-01-01", "2017-01-02"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(0),(0)")
tk.MustQuery("select 1 from (select a from t union all select a from t) tmp").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select 10 as a from dual union select a from t order by a desc limit 1 ").Check(testkit.Rows("10"))
tk.MustQuery("select -10 as a from dual union select a from t order by a limit 1 ").Check(testkit.Rows("-10"))
tk.MustQuery("select count(1) from (select a from t union all select a from t) tmp").Check(testkit.Rows("4"))
_, err := tk.Exec("select 1 from (select a from t limit 1 union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage))
_, err = tk.Exec("select 1 from (select a from t order by a union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage))
_, err = tk.Exec("(select a from t order by a) union all select a from t limit 1 union all select a from t limit 1")
c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongUsage), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("(select a from t limit 1) union all select a from t limit 1")
c.Assert(err, IsNil)
_, err = tk.Exec("(select a from t order by a) union all select a from t order by a")
c.Assert(err, IsNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(1),(2),(3)")
tk.MustQuery("(select a from t order by a limit 2) union all (select a from t order by a desc limit 2) order by a desc limit 1,2").Check(testkit.Rows("2", "2"))
tk.MustQuery("select a from t union all select a from t order by a desc limit 5").Check(testkit.Rows("3", "3", "2", "2", "1"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select a from t group by a order by a").Check(testkit.Rows("1", "2", "2", "3", "3"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select 33 as a order by a desc limit 2").Check(testkit.Rows("33", "3"))
tk.MustQuery("select 1 union select 1 union all select 1").Check(testkit.Rows("1", "1"))
tk.MustQuery("select 1 union all select 1 union select 1").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec(`create table t1(a bigint, b bigint);`)
tk.MustExec(`create table t2(a bigint, b bigint);`)
tk.MustExec(`insert into t1 values(1, 1);`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t2 values(1, 1);`)
tk.MustExec(`set @@tidb_max_chunk_size=2;`)
tk.MustQuery(`select count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("128"))
tk.MustQuery(`select tmp.a, count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("1 128"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t value(1 ,2)")
tk.MustQuery("select a, b from (select a, 0 as d, b from t union all select a, 0 as d, b from t) test;").Check(testkit.Rows("1 2", "1 2"))
// #issue 8141
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("insert into t1 value(1,2),(1,1),(2,2),(2,2),(3,2),(3,2)")
tk.MustExec("set @@tidb_max_chunk_size=2;")
tk.MustQuery("select count(*) from (select a as c, a as d from t1 union all select a, b from t1) t;").Check(testkit.Rows("12"))
// #issue 8189 and #issue 8199
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustQuery("select a from t1 union select a from t1 order by (select a+1);").Check(testkit.Rows("1", "2", "3"))
// #issue 8201
for i := 0; i < 4; i++ {
tk.MustQuery("SELECT(SELECT 0 AS a FROM dual UNION SELECT 1 AS a FROM dual ORDER BY a ASC LIMIT 1) AS dev").Check(testkit.Rows("0"))
}
// #issue 8231
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE t1 (uid int(1))")
tk.MustExec("INSERT INTO t1 SELECT 150")
tk.MustQuery("SELECT 'a' UNION SELECT uid FROM t1 order by 1 desc;").Check(testkit.Rows("a", "150"))
}
func (s *testSuite) TestNeighbouringProj(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 value(1, 1), (2, 2)")
tk.MustExec("insert into t2 value(1, 1), (2, 2)")
tk.MustQuery("select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t;").Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint, b bigint, c bigint);")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3);")
rs := tk.MustQuery("select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10;")
rs.Check(testkit.Rows("1 1 1", "1 2 2", "1 3 3"))
}
func (s *testSuite) TestIn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (c1 int primary key, c2 int, key c (c2));`)
for i := 0; i <= 200; i++ {
tk.MustExec(fmt.Sprintf("insert t values(%d, %d)", i, i))
}
queryStr := `select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2`
r := tk.MustQuery(queryStr)
r.Check(testkit.Rows("7", "9", "10", "17", "18", "98", "100", "106", "111", "112"))
queryStr = `select c2 from t where c1 in ('7a')`
tk.MustQuery(queryStr).Check(testkit.Rows("7"))
}
func (s *testSuite) TestTablePKisHandleScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int PRIMARY KEY AUTO_INCREMENT)")
tk.MustExec("insert t values (),()")
tk.MustExec("insert t values (-100),(0)")
tests := []struct {
sql string
result [][]interface{}
}{
{
"select * from t",
testkit.Rows("-100", "1", "2", "3"),
},
{
"select * from t where a = 1",
testkit.Rows("1"),
},
{
"select * from t where a != 1",
testkit.Rows("-100", "2", "3"),
},
{
"select * from t where a >= '1.1'",
testkit.Rows("2", "3"),
},
{
"select * from t where a < '1.1'",
testkit.Rows("-100", "1"),
},
{
"select * from t where a > '-100.1' and a < 2",
testkit.Rows("-100", "1"),
},
{
"select * from t where a is null",
testkit.Rows(),
}, {
"select * from t where a is true",
testkit.Rows("-100", "1", "2", "3"),
}, {
"select * from t where a is false",
testkit.Rows(),
},
{
"select * from t where a in (1, 2)",
testkit.Rows("1", "2"),
},
{
"select * from t where a between 1 and 2",
testkit.Rows("1", "2"),
},
}
for _, tt := range tests {
result := tk.MustQuery(tt.sql)
result.Check(tt.result)
}
}
func (s *testSuite) TestIndexScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (-1), (2), (3), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'")
result.Check(testkit.Rows("-1", "3", "5", "6", "7", "9"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (0)")
result = tk.MustQuery("select NULL from t ")
result.Check(testkit.Rows("<nil>"))
// test for double read
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (5, 0)")
tk.MustExec("insert t values (4, 0)")
tk.MustExec("insert t values (3, 0)")
tk.MustExec("insert t values (2, 0)")
tk.MustExec("insert t values (1, 0)")
tk.MustExec("insert t values (0, 0)")
result = tk.MustQuery("select * from t order by a limit 3")
result.Check(testkit.Rows("0 0", "1 0", "2 0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (0, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (3, 2)")
tk.MustExec("insert t values (4, 1)")
tk.MustExec("insert t values (5, 2)")
result = tk.MustQuery("select * from t where a < 5 and b = 1 limit 2")
result.Check(testkit.Rows("0 1", "2 1"))
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (col0)")
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (col1)")
tk.MustExec("CREATE INDEX idx_tab1_3 on tab1 (col3)")
tk.MustExec("CREATE INDEX idx_tab1_4 on tab1 (col4)")
tk.MustExec("INSERT INTO tab1 VALUES(1,37,20.85,30,10.69)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42)")
result.Check(testkit.Rows())
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (a)")
tk.MustExec("INSERT INTO tab1 VALUES(1,1,1)")
tk.MustExec("INSERT INTO tab1 VALUES(2,2,1)")
tk.MustExec("INSERT INTO tab1 VALUES(3,1,2)")
tk.MustExec("INSERT INTO tab1 VALUES(4,2,2)")
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 3 AND a = 1")
result.Check(testkit.Rows("1 1 1", "3 1 2"))
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2")
result.Check(testkit.Rows("3 1 2"))
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (b, a)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE b > 1")
result.Check(testkit.Rows("3", "4"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a varchar(3), index(a))")
tk.MustExec("insert t values('aaa'), ('aab')")
result = tk.MustQuery("select * from t where a >= 'aaaa' and a < 'aabb'")
result.Check(testkit.Rows("aab"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, index(c))")
tk.MustExec("insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5)")
// Test for double read and top n.
result = tk.MustQuery("select a from t where c >= 2 order by b desc limit 1")
result.Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(50) primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values('aa', 1, 1)")
tk.MustQuery("select * from t use index(idx) where a > 'a'").Check(testkit.Rows("aa 1 1"))
}
func (s *testSuite) TestIndexReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int, index idx (b))")
tk.MustExec("insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by b desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
result = tk.MustQuery("select b from t where b <3 or (b >=6 and b < 8) order by b desc")
result.Check(testkit.Rows("7", "6", "2", "1", "0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index idx (b, a))")
tk.MustExec("insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0)")
result = tk.MustQuery("select b, a from t order by b, a desc")
result.Check(testkit.Rows("0 2", "0 1", "0 0", "1 2", "1 1", "1 0", "2 2", "2 1", "2 0"))
}
func (s *testSuite) TestTableReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int)")
tk.MustExec("insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by a desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1"))
result = tk.MustQuery("select a from t where a <3 or (a >=6 and a < 8) order by a desc")
result.Check(testkit.Rows("7", "6", "2", "1"))
}
func (s *testSuite) TestDefaultNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int default 1, c int)")
tk.MustExec("insert t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("update t set b = NULL where a = 1")
tk.MustQuery("select * from t").Check(testkit.Rows("1 <nil> <nil>"))
tk.MustExec("update t set c = 1")
tk.MustQuery("select * from t ").Check(testkit.Rows("1 <nil> 1"))
tk.MustExec("delete from t where a = 1")
tk.MustExec("insert t (a) values (1)")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
}
func (s *testSuite) TestUnsignedPKColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a));")
tk.MustExec("insert t values (1, 1, 1)")
result := tk.MustQuery("select * from t;")
result.Check(testkit.Rows("1 1 1"))
tk.MustExec("update t set c=2 where a=1;")
result = tk.MustQuery("select * from t where b=1;")
result.Check(testkit.Rows("1 1 2"))
}
func (s *testSuite) TestJSON(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json (id int, a json)")
tk.MustExec(`insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}')`)
tk.MustExec(`insert into test_json (id, a) values (2, "null")`)
tk.MustExec(`insert into test_json (id, a) values (3, null)`)
tk.MustExec(`insert into test_json (id, a) values (4, 'true')`)
tk.MustExec(`insert into test_json (id, a) values (5, '3')`)
tk.MustExec(`insert into test_json (id, a) values (5, '4.0')`)
tk.MustExec(`insert into test_json (id, a) values (6, '"string"')`)
var result *testkit.Result
result = tk.MustQuery(`select tj.a from test_json tj order by tj.id`)
result.Check(testkit.Rows(`{"a": [1, "2", {"aa": "bb"}, 4], "b": true}`, "null", "<nil>", "true", "3", "4", `"string"`))
// Check json_type function
result = tk.MustQuery(`select json_type(a) from test_json tj order by tj.id`)
result.Check(testkit.Rows("OBJECT", "NULL", "<nil>", "BOOLEAN", "INTEGER", "DOUBLE", "STRING"))
// Check json compare with primitives.
result = tk.MustQuery(`select a from test_json tj where a = 3`)
result.Check(testkit.Rows("3"))
result = tk.MustQuery(`select a from test_json tj where a = 4.0`)
result.Check(testkit.Rows("4"))
result = tk.MustQuery(`select a from test_json tj where a = true`)
result.Check(testkit.Rows("true"))
result = tk.MustQuery(`select a from test_json tj where a = "string"`)
result.Check(testkit.Rows(`"string"`))
// Check cast(true/false as JSON).
result = tk.MustQuery(`select cast(true as JSON)`)
result.Check(testkit.Rows(`true`))
result = tk.MustQuery(`select cast(false as JSON)`)
result.Check(testkit.Rows(`false`))
// Check two json grammar sugar.
result = tk.MustQuery(`select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`bb true`))
result = tk.MustQuery(`select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`"bb" true`))
// Check some DDL limits for TEXT/BLOB/JSON column.
var err error
var terr *terror.Error
_, err = tk.Exec(`create table test_bad_json(a json default '{}')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a blob default 'hello')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a text default 'world')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
// check json fields cannot be used as key.
_, err = tk.Exec(`create table test_bad_json(id int, a json, key (a))`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrJSONUsedAsKey))
// check CAST AS JSON.
result = tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`)
result.Check(testkit.Rows(`3 {} <nil>`))
// Check cast json to decimal.
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json );")
tk.MustExec(`insert into test_json (b) values
('{"c": "1267.1"}'),
('{"c": "1267.01"}'),
('{"c": "1267.1234"}'),
('{"c": "1267.3456"}'),
('{"c": "1234567890123456789012345678901234567890123456789012345"}'),
('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}');`)
tk.MustQuery("select a from test_json;").Check(testkit.Rows("1267.10", "1267.01", "1267.12",
"1267.35", "1234567890123456789012345678901234567890123456789012345.00",
"1234567890123456789012345678901234567890123456789012345.12"))
}
func (s *testSuite) TestMultiUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_mu (a int primary key, b int, c int)`)
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9)`)
// Test INSERT ... ON DUPLICATE UPDATE set_lists.
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b`)
result := tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 3 3`, `4 5 6`, `7 8 9`))
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 6`, `7 8 9`))
// Test UPDATE ... set_lists.
tk.MustExec(`UPDATE test_mu SET b = 0, c = b WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 0 0`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = 8, b = c WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 8 8`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = b, b = c WHERE a = 7`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 8 8`, `7 8 8`))
}
func (s *testSuite) TestGeneratedColumnWrite(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual)`)
tk.MustExec(`CREATE TABLE test_gc_write_1 (a int primary key, b int, c int)`)
tests := []struct {
stmt string
err int
}{
// Can't modify generated column by values.
{`insert into test_gc_write (a, b, c) values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
{`insert into test_gc_write values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by select clause.
{`insert into test_gc_write select 1, 1, 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by on duplicate clause.
{`insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by set.
{`insert into test_gc_write set a = 1, b = 1, c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by update clause.
{`update test_gc_write set c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by multi-table update clause.
{`update test_gc_write, test_gc_write_1 set test_gc_write.c = 1`, mysql.ErrBadGeneratedColumn},
// Can insert without generated columns.
{`insert into test_gc_write (a, b) values (1, 1)`, 0},
{`insert into test_gc_write set a = 2, b = 2`, 0},
{`insert into test_gc_write (b) select c from test_gc_write`, 0},
// Can update without generated columns.
{`update test_gc_write set b = 2 where a = 2`, 0},
{`update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4`, 0},
// But now we can't do this, just as same with MySQL 5.7:
{`insert into test_gc_write values (1, 1)`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write select 1, 1`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (c) select a, b from test_gc_write`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (b, c) select a, b from test_gc_write`, mysql.ErrBadGeneratedColumn},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil, Commentf("sql is `%v`", tt.stmt))
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err), Commentf("sql is %v", tt.stmt))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests select generated columns from table.
// They should be calculated from their generation expressions.
func (s *testSuite) TestGeneratedColumnRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored)`)
result := tk.MustQuery(`SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'`)
result.Check(testkit.Rows("`a` * `b`"))
// Insert only column a and b, leave c and d be calculated from them.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`, `3 4 7 12`))
tk.MustExec(`INSERT INTO test_gc_read SET a = 5, b = 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`, `3 4 7 12`, `5 10 15 50`))
tk.MustExec(`REPLACE INTO test_gc_read (a, b) VALUES (5, 6)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`, `3 4 7 12`, `5 6 11 30`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`, `3 4 7 12`, `5 9 14 45`))
// Test select only-generated-column-without-dependences.
result = tk.MustQuery(`SELECT c, d FROM test_gc_read`)
result.Check(testkit.Rows(`<nil> <nil>`, `3 2`, `7 12`, `14 45`))
// Test order of on duplicate key update list.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`, `3 4 7 12`, `6 6 12 36`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`, `3 4 7 12`, `8 8 16 64`))
// Test where-conditions on virtual/stored generated columns.
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 7`)
result.Check(testkit.Rows(`3 4 7 12`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 64`)
result.Check(testkit.Rows(`8 8 16 64`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read SET a = a + 100 WHERE c = 7`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 107`)
result.Check(testkit.Rows(`103 4 107 412`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 207`)
result.Check(testkit.Rows(`203 4 207 812`))
tk.MustExec(`UPDATE test_gc_read SET a = a - 200 WHERE d = 812`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 12`)
result.Check(testkit.Rows(`3 4 7 12`))
tk.MustExec(`INSERT INTO test_gc_read set a = 4, b = d + 1`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`, `3 4 7 12`,
`4 <nil> <nil> <nil>`, `8 8 16 64`))
tk.MustExec(`DELETE FROM test_gc_read where a = 4`)
// Test on-conditions on virtual/stored generated columns.
tk.MustExec(`CREATE TABLE test_gc_help(a int primary key, b int, c int, d int)`)
tk.MustExec(`INSERT INTO test_gc_help(a, b, c, d) SELECT * FROM test_gc_read`)
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2`, `3 4 7 12`, `8 8 16 64`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2`, `3 4 7 12`, `8 8 16 64`))
// Test generated column in subqueries.
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 2 3 2`))
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`3 4 7 12`, `8 8 16 64`))
result = tk.MustQuery(`SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b`)
result.Check(testkit.Rows(`2`, `4`, `8`))
// Test aggregation on virtual/stored generated columns.
result = tk.MustQuery(`SELECT c, sum(a) aa, max(d) dd FROM test_gc_read GROUP BY c ORDER BY aa`)
result.Check(testkit.Rows(`<nil> 0 <nil>`, `3 1 2`, `7 3 12`, `16 8 64`))
result = tk.MustQuery(`SELECT a, sum(c), sum(d) FROM test_gc_read GROUP BY a ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil>`, `1 3 2`, `3 7 12`, `8 16 64`))
// Test multi-update on generated columns.
tk.MustExec(`UPDATE test_gc_read m, test_gc_read n SET m.a = m.a + 10, n.a = n.a + 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`10 <nil> <nil> <nil>`, `11 2 13 22`, `13 4 17 52`, `18 8 26 144`))
// Test different types between generation expression and generated column.
tk.MustExec(`CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED)`)
tk.MustExec(`INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a')`)
result = tk.MustQuery(`SELECT c, d FROM test_gc_read_cast`)
result.Check(testkit.Rows(`3 3`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b))))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_1`)
result.Check(testkit.Rows(`yellow`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a'))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}')`)
result = tk.MustQuery(`SELECT b FROM test_gc_read_cast_2`)
result.Check(testkit.Rows(`{"key": "测"}`))
_, err := tk.Exec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a')`)
c.Assert(err, NotNil)
// Test not null generated columns.
tk.MustExec(`CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored)`)
tk.MustExec(`CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null)`)
tests := []struct {
stmt string
err int
}{
// Can't insert these records, because generated columns are not null.
{`insert into test_gc_read_1(a, b) values (1, null)`, mysql.ErrBadNull},
{`insert into test_gc_read_2(a, b) values (1, null)`, mysql.ErrBadNull},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err))
} else {
c.Assert(err, IsNil)
}
}
}
func (s *testSuite) TestToPBExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.4, 2.4)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a < 2.399999")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where a <= 1.1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b >= 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where not (b = 1)")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b&1 = a|1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b != 2 and b <=> 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b in (3)")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b not in (1, 2)")
result.Check(testkit.Rows("3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a varchar(255), b int)")
tk.MustExec("insert t values ('abc123', 1)")
tk.MustExec("insert t values ('ab123', 2)")
result = tk.MustQuery("select * from t where a like 'ab%'")
result.Check(testkit.Rows("abc123 1", "ab123 2"))
result = tk.MustQuery("select * from t where a like 'ab_12'")
result.Check(nil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustExec("insert t values (1)")
tk.MustExec("insert t values (2)")
result = tk.MustQuery("select * from t where not (a = 1)")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select * from t where not(not (a = 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select * from t where not(a != 1 and a != 2)")
result.Check(testkit.Rows("1", "2"))
}
func (s *testSuite) TestDatumXAPI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.2, 2.2)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a time(3), b time, index idx_a (a))")
tk.MustExec("insert t values ('11:11:11', '11:11:11')")
tk.MustExec("insert t values ('11:11:12', '11:11:12')")
tk.MustExec("insert t values ('11:11:13', '11:11:13')")
result = tk.MustQuery("select * from t where a > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
result = tk.MustQuery("select * from t where b > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
}
func (s *testSuite) TestSQLMode(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a tinyint not null)")
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert t values ()")
c.Check(err, NotNil)
_, err = tk.Exec("insert t values ('1000')")
c.Check(err, NotNil)
tk.MustExec("create table if not exists tdouble (a double(3,2))")
_, err = tk.Exec("insert tdouble values (10.23)")
c.Check(err, NotNil)
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values ()")
tk.MustExec("insert t values (1000)")
tk.MustQuery("select * from t").Check(testkit.Rows("0", "127"))
tk.MustExec("insert tdouble values (10.23)")
tk.MustQuery("select * from tdouble").Check(testkit.Rows("9.99"))
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
tk.MustExec("set @@global.sql_mode = ''")
// With the existence of global variable cache, it have to sleep a while here.
time.Sleep(3 * time.Second)
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("create table t2 (a varchar(3))")
tk2.MustExec("insert t2 values ('abcd')")
tk2.MustQuery("select * from t2").Check(testkit.Rows("abc"))
// session1 is still in strict mode.
_, err = tk.Exec("insert t2 values ('abcd')")
c.Check(err, NotNil)
// Restore original global strict mode.
tk.MustExec("set @@global.sql_mode = 'STRICT_TRANS_TABLES'")
}
func (s *testSuite) TestTableDual(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
result := tk.MustQuery("Select 1")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select count(*) from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual where 1")
result.Check(testkit.Rows("1"))
}
func (s *testSuite) TestTableScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use information_schema")
result := tk.MustQuery("select * from schemata")
// There must be these tables: information_schema, mysql, performance_schema and test.
c.Assert(len(result.Rows()), GreaterEqual, 4)
tk.MustExec("use test")
tk.MustExec("create database mytest")
rowStr1 := fmt.Sprintf("%s %s %s %s %v", "def", "mysql", "utf8mb4", "utf8mb4_bin", nil)
rowStr2 := fmt.Sprintf("%s %s %s %s %v", "def", "mytest", "utf8mb4", "utf8mb4_bin", nil)
tk.MustExec("use information_schema")
result = tk.MustQuery("select * from schemata where schema_name = 'mysql'")
result.Check(testkit.Rows(rowStr1))
result = tk.MustQuery("select * from schemata where schema_name like 'my%'")
result.Check(testkit.Rows(rowStr1, rowStr2))
result = tk.MustQuery("select 1 from tables limit 1")
result.Check(testkit.Rows("1"))
}
func (s *testSuite) TestAdapterStatement(c *C) {
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
se.GetSessionVars().TxnCtx.InfoSchema = domain.GetDomain(se).InfoSchema()
compiler := &executor.Compiler{Ctx: se}
stmtNode, err := s.ParseOneStmt("select 1", "", "")
c.Check(err, IsNil)
stmt, err := compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "select 1")
stmtNode, err = s.ParseOneStmt("create table test.t (a int)", "", "")
c.Check(err, IsNil)
stmt, err = compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "create table test.t (a int)")
}
func (s *testSuite) TestIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use mysql")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select * from help_topic where name='aaa'": true,
"select * from help_topic where help_topic_id=1": true,
"select * from help_topic where help_category_id=1": false,
}
infoSchema := executor.GetInfoSchema(ctx)
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
err = plannercore.Preprocess(ctx, stmtNode, infoSchema, false)
c.Check(err, IsNil)
p, err := planner.Optimize(ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
ret := executor.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(ret, Equals, result)
}
}
func (s *testSuite) TestRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 3)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (2, 3)")
result := tk.MustQuery("select * from t where (c, d) < (2,2)")
result.Check(testkit.Rows("1 1", "1 3", "2 1"))
result = tk.MustQuery("select * from t where (1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where row(1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where (c, d) = (select * from t where (c,d) = (1,1))")
result.Check(testkit.Rows("1 1"))
result = tk.MustQuery("select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d))")
result.Check(testkit.Rows("1 1", "1 3", "2 1", "2 3"))
result = tk.MustQuery("select (1, 2, 3) < (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 3)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 1, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) >= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) = (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) != (2, 3, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (row(1, 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 0) in (row(1, 1))")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (select 1, 1)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > row(1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > (select 1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select 1 > (select 1)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (select 1)")
result.Check(testkit.Rows("1"))
}
func (s *testSuite) TestColumnName(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
// disable only full group by
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
rs, err := tk.Exec("select 1 + c, count(*) from t")
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "1 + c")
c.Check(fields[0].ColumnAsName.L, Equals, "1 + c")
c.Check(fields[1].Column.Name.L, Equals, "count(*)")
c.Check(fields[1].ColumnAsName.L, Equals, "count(*)")
rs, err = tk.Exec("select (c) > all (select c from t) from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.L, Equals, "(c) > all (select c from t)")
c.Check(fields[0].ColumnAsName.L, Equals, "(c) > all (select c from t)")
tk.MustExec("begin")
tk.MustExec("insert t values(1,1)")
rs, err = tk.Exec("select c d, d c from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "d")
c.Check(fields[1].Column.Name.L, Equals, "d")
c.Check(fields[1].ColumnAsName.L, Equals, "c")
// Test case for query a column of a table.
// In this case, all attributes have values.
rs, err = tk.Exec("select c as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "t")
c.Check(fields[0].TableAsName.L, Equals, "t2")
c.Check(fields[0].DBName.L, Equals, "test")
// Test case for query a expression which only using constant inputs.
// In this case, the table, org_table and database attributes will all be empty.
rs, err = tk.Exec("select hour(1) as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "a")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "")
c.Check(fields[0].TableAsName.L, Equals, "")
c.Check(fields[0].DBName.L, Equals, "")
}
func (s *testSuite) TestSelectVar(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (d int)")
tk.MustExec("insert into t values(1), (2), (1)")
// This behavior is different from MySQL.
result := tk.MustQuery("select @a, @a := d+1 from t")
result.Check(testkit.Rows("<nil> 2", "2 3", "3 2"))
}
func (s *testSuite) TestHistoryRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists history_read")
tk.MustExec("create table history_read (a int)")
tk.MustExec("insert history_read values (1)")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20060102-15:04:05 -0700 MST"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
// Set snapshot to a time before save point will fail.
_, err := tk.Exec("set @@tidb_snapshot = '2006-01-01 15:04:05.999999'")
c.Assert(terror.ErrorEqual(err, variable.ErrSnapshotTooOld), IsTrue, Commentf("err %v", err))
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
curVer1, _ := s.store.CurrentVersion()
time.Sleep(time.Millisecond)
snapshotTime := time.Now()
time.Sleep(time.Millisecond)
curVer2, _ := s.store.CurrentVersion()
tk.MustExec("insert history_read values (2)")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
ctx := tk.Se.(sessionctx.Context)
snapshotTS := ctx.GetSessionVars().SnapshotTS
c.Assert(snapshotTS, Greater, curVer1.Ver)
c.Assert(snapshotTS, Less, curVer2.Ver)
tk.MustQuery("select * from history_read").Check(testkit.Rows("1"))
_, err = tk.Exec("insert history_read values (2)")
c.Assert(err, NotNil)
_, err = tk.Exec("update history_read set a = 3 where a = 1")
c.Assert(err, NotNil)
_, err = tk.Exec("delete from history_read where a = 1")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("insert history_read values (3)")
tk.MustExec("update history_read set a = 4 where a = 3")
tk.MustExec("delete from history_read where a = 1")
time.Sleep(time.Millisecond)
snapshotTime = time.Now()
time.Sleep(time.Millisecond)
tk.MustExec("alter table history_read add column b int")
tk.MustExec("insert history_read values (8, 8), (9, 9)")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tsoStr := strconv.FormatUint(oracle.EncodeTSO(snapshotTime.UnixNano()/int64(time.Millisecond)), 10)
tk.MustExec("set @@tidb_snapshot = '" + tsoStr + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
}
func (s *testSuite) TestScanControlSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx_b(b))")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select (select count(1) k from t s where s.b = t1.c) from t t1").Sort().Check(testkit.Rows("0", "1", "3", "3"))
}
func (s *testSuite) TestSimpleDAG(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int)")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t where a = 4").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select a from t limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select a from t order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustQuery("select a from t order by a desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t order by b desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where a < 3").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where b > 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where b > 1 and a < 3").Check(testkit.Rows())
tk.MustQuery("select count(*) from t where b > 1 and a < 3").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("4"))
tk.MustQuery("select count(*), c from t group by c order by c").Check(testkit.Rows("2 1", "1 2", "1 3"))
tk.MustQuery("select sum(c) as s from t group by b order by s").Check(testkit.Rows("3", "4"))
tk.MustQuery("select avg(a) as s from t group by b order by s").Check(testkit.Rows("2.0000", "4.0000"))
tk.MustQuery("select sum(distinct c) from t group by b").Check(testkit.Rows("3", "3"))
tk.MustExec("create index i on t(c,b)")
tk.MustQuery("select a from t where c = 1").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where c = 1 and a < 2").Check(testkit.Rows("1"))
tk.MustQuery("select a from t where c = 1 order by a limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from t where c = 1 ").Check(testkit.Rows("2"))
tk.MustExec("create index i1 on t(b)")
tk.MustQuery("select c from t where b = 2").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 2").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select count(*) from t where b = 1").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 1 and a > 1 limit 1").Check(testkit.Rows("2 1 1"))
// Test time push down.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, c1 datetime);")
tk.MustExec("insert into t values (1, '2015-06-07 12:12:12')")
tk.MustQuery("select id from t where c1 = '2015-06-07 12:12:12'").Check(testkit.Rows("1"))
}
func (s *testSuite) TestTimestampTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (ts timestamp)")
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t values ('2017-04-27 22:40:42')")
// The timestamp will get different value if time_zone session variable changes.
tests := []struct {
timezone string
expect string
}{
{"+10:00", "2017-04-28 08:40:42"},
{"-6:00", "2017-04-27 16:40:42"},
}
for _, tt := range tests {
tk.MustExec(fmt.Sprintf("set time_zone = '%s'", tt.timezone))
tk.MustQuery("select * from t").Check(testkit.Rows(tt.expect))
}
// For issue https://github.com/pingcap/tidb/issues/3467
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
uid int(11) DEFAULT NULL,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
ip varchar(128) DEFAULT NULL,
PRIMARY KEY (id),
KEY i_datetime (datetime),
KEY i_userid (uid)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1");`)
r := tk.MustQuery("select datetime from t1;") // Cover TableReaderExec
r.Check(testkit.Rows("2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10")) // Cover IndexReaderExec
r = tk.MustQuery("select * from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("123381351 1734 2014-03-31 08:57:10 127.0.0.1")) // Cover IndexLookupExec
// For issue https://github.com/pingcap/tidb/issues/3485
tk.MustExec("set time_zone = 'Asia/Shanghai'")
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10");`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
tk.MustExec(`alter table t1 add key i_datetime (datetime);`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery(`select * from t1;`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10"))
}
func (s *testSuite) TestTiDBCurrentTS(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
tk.MustExec("begin")
rows := tk.MustQuery("select @@tidb_current_ts").Rows()
tsStr := rows[0][0].(string)
c.Assert(tsStr, Equals, fmt.Sprintf("%d", tk.Se.Txn(true).StartTS()))
tk.MustExec("begin")
rows = tk.MustQuery("select @@tidb_current_ts").Rows()
newTsStr := rows[0][0].(string)
c.Assert(newTsStr, Equals, fmt.Sprintf("%d", tk.Se.Txn(true).StartTS()))
c.Assert(newTsStr, Not(Equals), tsStr)
tk.MustExec("commit")
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
_, err := tk.Exec("set @@tidb_current_ts = '1'")
c.Assert(terror.ErrorEqual(err, variable.ErrReadOnly), IsTrue, Commentf("err %v", err))
}
func (s *testSuite) TestSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
c.Assert(tk.Se.Txn(true).Valid(), IsFalse)
tk.MustExec("create table t (c1 int, c2 int, c3 int)")
tk.MustExec("insert t values (11, 2, 3)")
tk.MustExec("insert t values (12, 2, 3)")
tk.MustExec("insert t values (13, 2, 3)")
tk.MustExec("create table t1 (c1 int)")
tk.MustExec("insert t1 values (11)")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
_, err := tk1.Exec("commit")
c.Assert(err, NotNil)
// no conflict for subquery.
tk1.MustExec("begin")
tk1.MustQuery("select * from t where exists(select null from t1 where t1.c1=t.c1) for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=22 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict, auto commit
tk1.MustExec("set @@autocommit=1;")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
tk1.MustExec("commit")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from (select * from t for update) t join t1 for update")
tk2.MustExec("begin")
tk2.MustExec("update t1 set c1 = 13")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEmptyEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (e enum('Y', 'N'))")
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert into t values (0)")
c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("insert into t values ('abc')")
c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err))
tk.MustExec("set sql_mode=''")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows(""))
tk.MustExec("insert into t values ('abc')")
tk.MustQuery("select * from t").Check(testkit.Rows("", ""))
tk.MustExec("insert into t values (null)")
tk.MustQuery("select * from t").Check(testkit.Rows("", "", "<nil>"))
}
// TestIssue4024 This tests https://github.com/pingcap/tidb/issues/4024
func (s *testSuite) TestIssue4024(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test2")
tk.MustExec("use test2")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("update t, test2.t set test2.t.a=2")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
tk.MustExec("update test.t, test2.t set test.t.a=3")
tk.MustQuery("select * from t").Check(testkit.Rows("3"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
}
const (
checkRequestOff = 0
checkRequestPriority = 1
checkRequestSyncLog = 3
checkDDLAddIndexPriority = 4
)
type checkRequestClient struct {
tikv.Client
priority pb.CommandPri
lowPriorityCnt uint32
mu struct {
sync.RWMutex
checkFlags uint32
syncLog bool
}
}
func (c *checkRequestClient) setCheckPriority(priority pb.CommandPri) {
atomic.StoreInt32((*int32)(&c.priority), int32(priority))
}
func (c *checkRequestClient) getCheckPriority() pb.CommandPri {
return (pb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority)))
}
func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
resp, err := c.Client.SendRequest(ctx, addr, req, timeout)
c.mu.RLock()
checkFlags := c.mu.checkFlags
c.mu.RUnlock()
if checkFlags == checkRequestPriority {
switch req.Type {
case tikvrpc.CmdCop:
if c.getCheckPriority() != req.Priority {
return nil, errors.New("fail to set priority")
}
}
} else if checkFlags == checkRequestSyncLog {
switch req.Type {
case tikvrpc.CmdPrewrite, tikvrpc.CmdCommit:
c.mu.RLock()
syncLog := c.mu.syncLog
c.mu.RUnlock()
if syncLog != req.SyncLog {
return nil, errors.New("fail to set sync log")
}
}
} else if checkFlags == checkDDLAddIndexPriority {
if req.Type == tikvrpc.CmdScan {
if c.getCheckPriority() != req.Priority {
return nil, errors.New("fail to set priority")
}
} else if req.Type == tikvrpc.CmdPrewrite {
if c.getCheckPriority() == pb.CommandPri_Low {
atomic.AddUint32(&c.lowPriorityCnt, 1)
}
}
}
return resp, err
}
type testContextOptionSuite struct {
store kv.Storage
dom *domain.Domain
cli *checkRequestClient
}
func (s *testContextOptionSuite) SetUpSuite(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testContextOptionSuite) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testContextOptionSuite) TestAddIndexPriority(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
store, err := mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
)
c.Assert(err, IsNil)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (id int, v int)")
// Insert some data to make sure plan build IndexLookup for t1.
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i))
}
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Low)
tk.MustExec("alter table t1 add index t1_index (id);")
c.Assert(atomic.LoadUint32(&cli.lowPriorityCnt) > 0, IsTrue)
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_NORMAL'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Normal)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_HIGH'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_High)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testContextOptionSuite) TestAlterTableComment(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_1")
tk.MustExec("create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table';")
tk.MustExec("alter table `t_1` comment 'this is table comment';")
result := tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("this is table comment"))
tk.MustExec("alter table `t_1` comment 'table t comment';")
result = tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("table t comment"))
}
func (s *testContextOptionSuite) TestCoprocessorPriority(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int primary key)")
tk.MustExec("create table t1 (id int, v int, unique index i_id (id))")
defer tk.MustExec("drop table t")
defer tk.MustExec("drop table t1")
tk.MustExec("insert into t values (1)")
// Insert some data to make sure plan build IndexLookup for t1.
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i))
}
cli := s.cli
cli.mu.Lock()
cli.mu.checkFlags = checkRequestPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_High)
tk.MustQuery("select id from t where id = 1")
tk.MustQuery("select * from t1 where id = 1")
cli.setCheckPriority(pb.CommandPri_Normal)
tk.MustQuery("select count(*) from t")
tk.MustExec("update t set id = 3")
tk.MustExec("delete from t")
tk.MustExec("insert into t select * from t limit 2")
tk.MustExec("delete from t")
// Insert some data to make sure plan build IndexLookup for t.
tk.MustExec("insert into t values (1), (2)")
oldThreshold := config.GetGlobalConfig().Log.ExpensiveThreshold
config.GetGlobalConfig().Log.ExpensiveThreshold = 0
defer func() { config.GetGlobalConfig().Log.ExpensiveThreshold = oldThreshold }()
cli.setCheckPriority(pb.CommandPri_High)
tk.MustQuery("select id from t where id = 1")
tk.MustQuery("select * from t1 where id = 1")
cli.setCheckPriority(pb.CommandPri_Low)
tk.MustQuery("select count(*) from t")
tk.MustExec("delete from t")
tk.MustExec("insert into t values (3)")
// TODO: Those are not point get, but they should be high priority.
// cli.priority = pb.CommandPri_High
// tk.MustExec("delete from t where id = 2")
// tk.MustExec("update t set id = 2 where id = 1")
// Test priority specified by SQL statement.
cli.setCheckPriority(pb.CommandPri_High)
tk.MustQuery("select HIGH_PRIORITY * from t")
cli.setCheckPriority(pb.CommandPri_Low)
tk.MustQuery("select LOW_PRIORITY id from t where id = 1")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite) TestTimezonePushDown(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (ts timestamp)")
defer tk.MustExec("drop table t")
tk.MustExec(`insert into t values ("2018-09-13 10:02:06")`)
systemTZ := timeutil.SystemLocation()
c.Assert(systemTZ.String(), Not(Equals), "System")
c.Assert(systemTZ.String(), Not(Equals), "Local")
ctx := context.Background()
count := 0
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count += 1
dagReq := new(tipb.DAGRequest)
err := proto.Unmarshal(req.Data, dagReq)
c.Assert(err, IsNil)
c.Assert(dagReq.GetTimeZoneName(), Equals, systemTZ.String())
})
tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
tk.MustExec(`set time_zone="System"`)
tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(count, Equals, 2) // Make sure the hook function is called.
}
func (s *testSuite) TestNotFillCacheFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int primary key)")
defer tk.MustExec("drop table t")
tk.MustExec("insert into t values (1)")
tests := []struct {
sql string
expect bool
}{
{"select SQL_NO_CACHE * from t", true},
{"select SQL_CACHE * from t", false},
{"select * from t", false},
}
count := 0
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count++
if req.NotFillCache != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.NotFillCache)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
c.Assert(count, Equals, len(tests)) // Make sure the hook function is called.
}
func (s *testContextOptionSuite) TestSyncLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
cli := s.cli
cli.mu.Lock()
cli.mu.checkFlags = checkRequestSyncLog
cli.mu.syncLog = true
cli.mu.Unlock()
tk.MustExec("create table t (id int primary key)")
cli.mu.Lock()
cli.mu.syncLog = false
cli.mu.Unlock()
tk.MustExec("insert into t values (1)")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite) TestHandleTransfer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, index idx(a))")
tk.MustExec("insert into t values(1), (2), (4)")
tk.MustExec("begin")
tk.MustExec("update t set a = 3 where a = 4")
// test table scan read whose result need handle.
tk.MustQuery("select * from t ignore index(idx)").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("insert into t values(4)")
// test single read whose result need handle
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustExec("update t set a = 5 where a = 3")
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "4", "5"))
tk.MustExec("commit")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(3, 3), (1, 1), (2, 2)")
// Second test double read.
tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1", "2 2", "3 3"))
}
func (s *testSuite) TestBit(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(2))")
tk.MustExec("insert into t values (0), (1), (2), (3)")
_, err := tk.Exec("insert into t values (4)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values ('a')")
c.Assert(err, NotNil)
r, err := tk.Exec("select * from t where c1 = 2")
c.Assert(err, IsNil)
chk := r.NewChunk()
err = r.Next(context.Background(), chk)
c.Assert(err, IsNil)
c.Assert(types.BinaryLiteral(chk.GetRow(0).GetBytes(0)), DeepEquals, types.NewBinaryLiteralFromUint(2, -1))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(31))")
tk.MustExec("insert into t values (0x7fffffff)")
_, err = tk.Exec("insert into t values (0x80000000)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values (0xffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('123')")
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345)")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(62))")
tk.MustExec("insert into t values ('12345678')")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(61))")
_, err = tk.Exec("insert into t values ('12345678')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(32))")
tk.MustExec("insert into t values (0x7fffffff)")
tk.MustExec("insert into t values (0xffffffff)")
_, err = tk.Exec("insert into t values (0x1ffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
_, err = tk.Exec("insert into t values ('123456789')")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEnum(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c enum('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
}
func (s *testSuite) TestSet(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c set('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select * from t where c = 'a,b'").Check(testkit.Rows("a,b", "a,b"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
}
func (s *testSuite) TestSubqueryInValues(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, name varchar(20))")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (gid int)")
tk.MustExec("insert into t1 (gid) value (1)")
tk.MustExec("insert into t (id, name) value ((select gid from t1) ,'asd')")
tk.MustQuery("select * from t").Check(testkit.Rows("1 asd"))
}
func (s *testSuite) TestEnhancedRangeAccess(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b int)")
tk.MustExec("insert into t values(1, 2), (2, 1)")
tk.MustQuery("select * from t where (a = 1 and b = 2) or (a = 2 and b = 1)").Check(testkit.Rows("1 2", "2 1"))
tk.MustQuery("select * from t where (a = 1 and b = 1) or (a = 2 and b = 2)").Check(nil)
}
// TestMaxInt64Handle Issue #4810
func (s *testSuite) TestMaxInt64Handle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint, PRIMARY KEY (id))")
tk.MustExec("insert into t values(9223372036854775807)")
tk.MustExec("select * from t where id = 9223372036854775807")
tk.MustQuery("select * from t where id = 9223372036854775807;").Check(testkit.Rows("9223372036854775807"))
tk.MustQuery("select * from t").Check(testkit.Rows("9223372036854775807"))
_, err := tk.Exec("insert into t values(9223372036854775807)")
c.Assert(err, NotNil)
tk.MustExec("delete from t where id = 9223372036854775807")
tk.MustQuery("select * from t").Check(nil)
}
func (s *testSuite) TestTableScanWithPointRanges(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int, PRIMARY KEY (id))")
tk.MustExec("insert into t values(1), (5), (10)")
tk.MustQuery("select * from t where id in(1, 2, 10)").Check(testkit.Rows("1", "10"))
}
func (s *testSuite) TestUnsignedPk(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint unsigned primary key)")
var num1, num2 uint64 = math.MaxInt64 + 1, math.MaxInt64 + 2
tk.MustExec(fmt.Sprintf("insert into t values(%v), (%v), (1), (2)", num1, num2))
num1Str := strconv.FormatUint(num1, 10)
num2Str := strconv.FormatUint(num2, 10)
tk.MustQuery("select * from t order by id").Check(testkit.Rows("1", "2", num1Str, num2Str))
tk.MustQuery("select * from t where id not in (2)").Check(testkit.Rows(num1Str, num2Str, "1"))
}
func (s *testSuite) TestEarlyClose(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table earlyclose (id int primary key)")
// Insert 1000 rows.
var values []string
for i := 0; i < 1000; i++ {
values = append(values, fmt.Sprintf("(%d)", i))
}
tk.MustExec("insert earlyclose values " + strings.Join(values, ","))
// Get table ID for split.
dom := domain.GetDomain(tk.Se)
is := dom.InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("earlyclose"))
c.Assert(err, IsNil)
tblID := tbl.Meta().ID
// Split the table.
s.cluster.SplitTable(s.mvccStore, tblID, 500)
ctx := context.Background()
for i := 0; i < 500; i++ {
rss, err1 := tk.Se.Execute(ctx, "select * from earlyclose order by id")
c.Assert(err1, IsNil)
rs := rss[0]
chk := rs.NewChunk()
err = rs.Next(ctx, chk)
c.Assert(err, IsNil)
rs.Close()
}
// Goroutine should not leak when error happen.
gofail.Enable("github.com/pingcap/tidb/store/tikv/handleTaskOnceError", `return(true)`)
defer gofail.Disable("github.com/pingcap/tidb/store/tikv/handleTaskOnceError")
rss, err := tk.Se.Execute(ctx, "select * from earlyclose")
c.Assert(err, IsNil)
rs := rss[0]
chk := rs.NewChunk()
err = rs.Next(ctx, chk)
c.Assert(err, NotNil)
rs.Close()
}
func (s *testSuite) TestIssue5666(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@profiling=1")
tk.MustQuery("SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID;").Check(testkit.Rows("0 0"))
}
func (s *testSuite) TestIssue5341(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop table if exists test.t")
tk.MustExec("create table test.t(a char)")
tk.MustExec("insert into test.t value('a')")
tk.MustQuery("select * from test.t where a < 1 order by a limit 0;").Check(testkit.Rows())
}
func (s *testSuite) TestContainDotColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test.t1")
tk.MustExec("create table test.t1(t1.a char)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a char, t2.b int)")
tk.MustExec("drop table if exists t3")
_, err := tk.Exec("create table t3(s.a char);")
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongTableName))
}
func (s *testSuite) TestCheckIndex(c *C) {
s.ctx = mock.NewContext()
s.ctx.Store = s.store
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
defer se.Close()
_, err = se.Execute(context.Background(), "create database test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "create table t (pk int primary key, c int default 1, c1 int default 1, unique key c(c))")
c.Assert(err, IsNil)
is := s.domain.InfoSchema()
db := model.NewCIStr("test_admin")
dbInfo, ok := is.SchemaByName(db)
c.Assert(ok, IsTrue)
tblName := model.NewCIStr("t")
tbl, err := is.TableByName(db, tblName)
c.Assert(err, IsNil)
tbInfo := tbl.Meta()
alloc := autoid.NewAllocator(s.store, dbInfo.ID)
tb, err := tables.TableFromMeta(alloc, tbInfo)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t C")
c.Assert(err, IsNil)
// set data to:
// index data (handle, data): (1, 10), (2, 20)
// table data (handle, data): (1, 10), (2, 20)
recordVal1 := types.MakeDatums(int64(1), int64(10), int64(11))
recordVal2 := types.MakeDatums(int64(2), int64(20), int64(21))
c.Assert(s.ctx.NewTxn(), IsNil)
_, err = tb.AddRecord(s.ctx, recordVal1, false)
c.Assert(err, IsNil)
_, err = tb.AddRecord(s.ctx, recordVal2, false)
c.Assert(err, IsNil)
c.Assert(s.ctx.Txn(true).Commit(context.Background()), IsNil)
mockCtx := mock.NewContext()
idx := tb.Indices()[0]
sc := &stmtctx.StatementContext{TimeZone: time.Local}
_, err = se.Execute(context.Background(), "admin check index t idx_inexistent")
c.Assert(strings.Contains(err.Error(), "not exist"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(30)), 3)
c.Assert(err, IsNil)
key := tablecodec.EncodeRowKey(tb.Meta().ID, codec.EncodeInt(nil, 4))
setColValue(c, txn, key, types.NewDatum(int64(40)))
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), "isn't equal to value count"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(40)), 4)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 4"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(30)), 3)
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(20)), 2)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 2"), IsTrue)
// TODO: pass the case below:
// set data to:
// index data (handle, data): (1, 10), (4, 40), (2, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
}
func setColValue(c *C, txn kv.Transaction, key kv.Key, v types.Datum) {
row := []types.Datum{v, {}}
colIDs := []int64{2, 3}
sc := &stmtctx.StatementContext{TimeZone: time.Local}
value, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil)
c.Assert(err, IsNil)
err = txn.Set(key, value)
c.Assert(err, IsNil)
}
func (s *testSuite) TestCheckTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Test 'admin check table' when the table has a unique index with null values.
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCoprocessorStreamingFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, value int, index idx(id))")
// Add some data to make statistics work.
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i))
}
tests := []struct {
sql string
expect bool
}{
{"select * from t", true}, // TableReader
{"select * from t where id = 5", true}, // IndexLookup
{"select * from t where id > 5", true}, // Filter
{"select * from t limit 3", false}, // Limit
{"select avg(id) from t", false}, // Aggregate
{"select * from t order by value limit 3", false}, // TopN
}
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
if req.Streaming != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.Streaming)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
}
func (s *testSuite) TestIncorrectLimitArg(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint);`)
tk.MustExec(`prepare stmt1 from 'select * from t limit ?';`)
tk.MustExec(`prepare stmt2 from 'select * from t limit ?, ?';`)
tk.MustExec(`set @a = -1;`)
tk.MustExec(`set @b = 1;`)
var err error
_, err = tk.Se.Execute(context.TODO(), `execute stmt1 using @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
_, err = tk.Se.Execute(context.TODO(), `execute stmt2 using @b, @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
}
func (s *testSuite) TestLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint, b bigint);`)
tk.MustExec(`insert into t values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6);`)
tk.MustQuery(`select * from t order by a limit 1, 1;`).Check(testkit.Rows(
"2 2",
))
tk.MustQuery(`select * from t order by a limit 1, 2;`).Check(testkit.Rows(
"2 2",
"3 3",
))
tk.MustQuery(`select * from t order by a limit 1, 3;`).Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
))
tk.MustQuery(`select * from t order by a limit 1, 4;`).Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
"5 5",
))
tk.MustExec(`set @@tidb_max_chunk_size=2;`)
tk.MustQuery(`select * from t order by a limit 2, 1;`).Check(testkit.Rows(
"3 3",
))
tk.MustQuery(`select * from t order by a limit 2, 2;`).Check(testkit.Rows(
"3 3",
"4 4",
))
tk.MustQuery(`select * from t order by a limit 2, 3;`).Check(testkit.Rows(
"3 3",
"4 4",
"5 5",
))
tk.MustQuery(`select * from t order by a limit 2, 4;`).Check(testkit.Rows(
"3 3",
"4 4",
"5 5",
"6 6",
))
}
func (s *testSuite) TestCoprocessorStreamingWarning(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a double)")
tk.MustExec("insert into t value(1.2)")
tk.MustExec("set @@session.tidb_enable_streaming = 1")
result := tk.MustQuery("select * from t where a/0 > 1")
result.Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1105|Division by 0"))
}
func (s *testSuite) TestYearTypeDeleteIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a YEAR, PRIMARY KEY(a));")
tk.MustExec("insert into t set a = '2151';")
tk.MustExec("delete from t;")
tk.MustExec("admin check table t")
}
func (s *testSuite) TestForSelectScopeInUnion(c *C) {
// A union B for update, the "for update" option belongs to union statement, so
// it should works on both A and B.
tk1 := testkit.NewTestKit(c, s.store)
tk2 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t")
tk1.MustExec("create table t(a int)")
tk1.MustExec("insert into t values (1)")
tk1.MustExec("begin")
// 'For update' would act on the second select.
tk1.MustQuery("select 1 as a union select a from t for update")
tk2.MustExec("use test")
tk2.MustExec("update t set a = a + 1")
// As tk1 use select 'for update', it should detect conflict and fail.
_, err := tk1.Exec("commit")
c.Assert(err, NotNil)
tk1.MustExec("begin")
// 'For update' would be ignored if 'order by' or 'limit' exists.
tk1.MustQuery("select 1 as a union select a from t limit 5 for update")
tk1.MustQuery("select 1 as a union select a from t order by a for update")
tk2.MustExec("update t set a = a + 1")
_, err = tk1.Exec("commit")
c.Assert(err, IsNil)
}
func (s *testSuite) TestUnsignedDecimalOverflow(c *C) {
tests := []struct {
input interface{}
hasErr bool
err string
}{{
-1,
true,
"Out of range value for column",
}, {
"-1.1e-1",
true,
"Out of range value for column",
}, {
-1.1,
true,
"Out of range value for column",
}, {
-0,
false,
"",
},
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(10,2) unsigned)")
for _, t := range tests {
res, err := tk.Exec("insert into t values (?)", t.input)
if res != nil {
defer res.Close()
}
if t.hasErr {
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), t.err), IsTrue)
} else {
c.Assert(err, IsNil)
}
if res != nil {
res.Close()
}
}
tk.MustExec("set sql_mode=''")
tk.MustExec("delete from t")
tk.MustExec("insert into t values (?)", -1)
r := tk.MustQuery("select a from t limit 1")
r.Check(testkit.Rows("0.00"))
}
func (s *testSuite) TestIndexJoinTableDualPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists a")
tk.MustExec("create table a (f1 int, f2 varchar(32), primary key (f1))")
tk.MustExec("insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c')")
tk.MustQuery("select a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1;").
Check(testkit.Rows("1 a"))
}
func (s *testSuite) TestUnionAutoSignedCast(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (id int, i int, b bigint, d double, dd decimal)")
tk.MustExec("create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned)")
tk.MustExec("insert into t1 values(1, -1, -1, -1.1, -1)")
tk.MustExec("insert into t2 values(2, 1, 1, 1.1, 1)")
tk.MustQuery("select * from t1 union select * from t2 order by id").
Check(testkit.Rows("1 -1 -1 -1.1 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id").
Check(testkit.Rows("1 0 0 0 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id").
Check(testkit.Rows("1 18446744073709551615", "2 1"))
tk.MustQuery("select dd from t2 union all select dd from t2").
Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t3,t4")
tk.MustExec("create table t3 (id int, v int)")
tk.MustExec("create table t4 (id int, v double unsigned)")
tk.MustExec("insert into t3 values (1, -1)")
tk.MustExec("insert into t4 values (2, 1)")
tk.MustQuery("select id, v from t3 union select id, v from t4 order by id").
Check(testkit.Rows("1 -1", "2 1"))
tk.MustQuery("select id, v from t4 union select id, v from t3 order by id").
Check(testkit.Rows("1 0", "2 1"))
tk.MustExec("drop table if exists t5,t6,t7")
tk.MustExec("create table t5 (id int, v bigint unsigned)")
tk.MustExec("create table t6 (id int, v decimal)")
tk.MustExec("create table t7 (id int, v bigint)")
tk.MustExec("insert into t5 values (1, 1)")
tk.MustExec("insert into t6 values (2, -1)")
tk.MustExec("insert into t7 values (3, -1)")
tk.MustQuery("select id, v from t5 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1"))
tk.MustQuery("select id, v from t5 union select id, v from t7 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1", "3 -1"))
}
func (s *testSuite) TestUpdateJoin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7")
tk.MustExec("create table t1(k int, v int)")
tk.MustExec("create table t2(k int, v int)")
tk.MustExec("create table t3(id int auto_increment, k int, v int, primary key(id))")
tk.MustExec("create table t4(k int, v int)")
tk.MustExec("create table t5(v int, k int, primary key(k))")
tk.MustExec("insert into t1 values (1, 1)")
tk.MustExec("insert into t4 values (3, 3)")
tk.MustExec("create table t6 (id int, v longtext)")
tk.MustExec("create table t7 (x int, id int, v longtext, primary key(id))")
// test the normal case that update one row for a single table.
tk.MustExec("update t1 set v = 0 where k = 1")
tk.MustQuery("select k, v from t1 where k = 1").Check(testkit.Rows("1 0"))
// test the case that the table with auto_increment or none-null columns as the right table of left join.
tk.MustExec("update t1 left join t3 on t1.k = t3.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 1"))
tk.MustQuery("select id, k, v from t3").Check(testkit.Rows())
// test left join and the case that the right table has no matching record but has updated the right table columns.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case that the update operation in the left table references data in the right table while data of the right table columns is modified.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test right join and the case that the left table has no matching record but has updated the left table columns.
tk.MustExec("update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case of right join and left join at the same time.
tk.MustExec("update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
tk.MustQuery("select k, v from t4").Check(testkit.Rows("3 4"))
// test normal left join and the case that the right table has matching rows.
tk.MustExec("insert t2 values (1, 10)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 11")
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the case of continuously joining the same table and updating the unmatching records.
tk.MustExec("update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 111"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the left join case that the left table has records but all records are null.
tk.MustExec("delete from t1")
tk.MustExec("delete from t2")
tk.MustExec("insert into t1 values (null, null)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 1"))
// test the case that the right table of left join has an primary key.
tk.MustExec("insert t5 values(0, 0)")
tk.MustExec("update t1 left join t5 on t1.k = t5.k set t1.v = 2")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 2"))
tk.MustQuery("select k, v from t5").Check(testkit.Rows("0 0"))
tk.MustExec("insert into t6 values (1, NULL)")
tk.MustExec("insert into t7 values (5, 1, 'a')")
tk.MustExec("update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5")
tk.MustQuery("select v from t6").Check(testkit.Rows("a"))
}
func (s *testSuite) TestMaxOneRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`drop table if exists t2`)
tk.MustExec(`create table t1(a double, b double);`)
tk.MustExec(`create table t2(a double, b double);`)
tk.MustExec(`insert into t1 values(1, 1), (2, 2), (3, 3);`)
tk.MustExec(`insert into t2 values(0, 0);`)
tk.MustExec(`set @@tidb_max_chunk_size=1;`)
rs, err := tk.Exec(`select (select t1.a from t1 where t1.a > t2.a) as a from t2;`)
c.Assert(err, IsNil)
err = rs.Next(context.TODO(), rs.NewChunk())
c.Assert(err.Error(), Equals, "subquery returns more than 1 row")
err = rs.Close()
c.Assert(err, IsNil)
}
func (s *testSuite) TestCurrentTimestampValueSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (id int, t0 timestamp null default current_timestamp, t1 timestamp(1) null default current_timestamp(1), t2 timestamp(2) null default current_timestamp(2) on update current_timestamp(2))")
tk.MustExec("insert into t (id) values (1)")
rs := tk.MustQuery("select t0, t1, t2 from t where id = 1")
t0 := rs.Rows()[0][0].(string)
t1 := rs.Rows()[0][1].(string)
t2 := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(t0, ".")), Equals, 1)
c.Assert(len(strings.Split(t1, ".")[1]), Equals, 1)
c.Assert(len(strings.Split(t2, ".")[1]), Equals, 2)
tk.MustQuery("select id from t where t0 = ?", t0).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t1 = ?", t1).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t2 = ?", t2).Check(testkit.Rows("1"))
time.Sleep(time.Second / 2)
tk.MustExec("update t set t0 = now() where id = 1")
rs = tk.MustQuery("select t2 from t where id = 1")
newT2 := rs.Rows()[0][0].(string)
c.Assert(newT2 != t2, IsTrue)
tk.MustExec("create table t1 (id int, a timestamp, b timestamp(2), c timestamp(3))")
tk.MustExec("insert into t1 (id, a, b, c) values (1, current_timestamp(2), current_timestamp, current_timestamp(3))")
rs = tk.MustQuery("select a, b, c from t1 where id = 1")
a := rs.Rows()[0][0].(string)
b := rs.Rows()[0][1].(string)
d := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(a, ".")), Equals, 1)
c.Assert(strings.Split(b, ".")[1], Equals, "00")
c.Assert(len(strings.Split(d, ".")[1]), Equals, 3)
}
func (s *testSuite) TestRowID(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c));`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustQuery(`select b, _tidb_rowid from t use index(idx) where a = 'a';`).Check(testkit.Rows(
`b 1`,
`b 2`,
))
tk.MustExec(`begin;`)
tk.MustExec(`select * from t for update`)
tk.MustQuery(`select distinct b from t use index(idx) where a = 'a';`).Check(testkit.Rows(`b`))
tk.MustExec(`commit;`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(5) primary key)`)
tk.MustExec(`insert into t values('a')`)
tk.MustQuery("select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1").Check(testkit.Rows("a 1"))
}
func (s *testSuite) TestDoSubquery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
_, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
tk.MustExec(`insert into t values(1)`)
r, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
c.Assert(r, IsNil, Commentf("result of Do not empty"))
}
| [
"\"log_level\""
] | [] | [
"log_level"
] | [] | ["log_level"] | go | 1 | 0 | |
tests/mockAws/test_handler_renderHtml_writes.py | import unittest
import libraries.morflessLibs as libs
import boto3
import os, json
"""
set environment to allow
renderHtml handler library module to be called
"""
from moto import mock_s3
os.environ['SOURCE_BUCKET'] = "sourcebucket"
os.environ['TARGET_BUCKET'] = "targetbucket"
os.environ["LIST_BUCKET"] = "listbucket"
os.environ["SEARCH_BUCKET"] = "searchbucket"
import lambda_source.RenderHtml.renderHtml_handlerLibrary as rdl
from fixtures.create_files_for_bucket import get_file_content
from fixtures.decorators import testCall
from collections import OrderedDict
# constants for test
SETTINGS_FILE = 'settings.txt'
POSTLIST_FILE = 'postlist.json'
ARCHIVE_FILE = 'archive.json'
DEFAULT_SOURCE_ROOT = "tests/mockAws/default_source/renderHtml_writes/"
FILENAME = "test_file.txt"
OTHERFILE = "test_file_other.txt"
REGION = "us-east-1"
FILE_SOURCE = "tests/mockAws/default_source/settings.txt"
JSON_FILE = 'test_json_file.json'
JSON_FILE_2 = 'test_json_file_2.json'
OTHER_JSON = 'test_other.json'
HTML_FILE = 'test_html_file.html'
STANDARD_FILE = 'test_standard_file.txt'
JSON_TEXT = """
{
"variable1": "this",
"variable2": "that"
}
"""
JSON_TEXT_2 = """
{
"variable3": "this",
"variable4": {
"variable5" : [
"that",
"that again"
]
}
}
"""
JSON_ORDERED = OrderedDict([('variable1', 'this'), ('variable2', 'that')])
HTML_TEXT = """
<html>
<head>Something here</head>
<body>Somethinf else here</body>
</html>
"""
STANDARD_TEXT = """
Something here
"""
CONTENT_DISPOSITION = 'ContentDisposition'
CATEGORIES_DEFAULT = {
"no_of_category_pages": 1,
"categories": [
]
}
AUTHORS_DEFAULT = {
"no_of_author_pages": 1,
"authors": [
]
}
LIST_META_DEFAULT = { \
'categories': CATEGORIES_DEFAULT,
'authors': AUTHORS_DEFAULT
}
DEPENDENCIES_DEFAULT = [\
{
"filename": "index.page",
"dependencies": [
"settings.txt"
]
},
{
"filename": "404.page",
"dependencies": [
"settings.txt"
]
}
]
@mock_s3
class RenderHtmlReadWrites(unittest.TestCase):
# set up bucket
def setUp(self):
self.log = {}
self.read_content = ''
self.maxDiff = None
class_dir = os.getcwd()
file_source = os.path.join(class_dir,DEFAULT_SOURCE_ROOT,SETTINGS_FILE)
self.settings_content = get_file_content(file_source)
file_source = os.path.join(class_dir,DEFAULT_SOURCE_ROOT,POSTLIST_FILE)
self.postlist_default = json.loads(get_file_content(file_source))
file_source = os.path.join(class_dir,DEFAULT_SOURCE_ROOT,ARCHIVE_FILE)
self.archive_default = json.loads(get_file_content(file_source))
# create bucket and write json content to it
self.s3resource = boto3.resource('s3', region_name=REGION)
self.s3resource.create_bucket(Bucket=rdl.sourcebucket)
self.s3resource.create_bucket(Bucket=rdl.listbucket)
self.s3resource.create_bucket(Bucket=rdl.searchbucket)
self.s3resource.create_bucket(Bucket=rdl.targetbucket)
self.s3client = boto3.client('s3', region_name=REGION)
self.s3client.put_object(Bucket=rdl.listbucket, Key=JSON_FILE, Body=JSON_TEXT)
self.s3client.put_object(Bucket=rdl.sourcebucket, Key=SETTINGS_FILE, Body=self.settings_content)
def tearDown(self):
self.log = {}
self.source_content = ''
self.read_content = ''
self.settings_content = {}
self.postlist_default = {}
# delete content from s3 and delete buckets
# source
bucket = self.s3resource.Bucket(rdl.sourcebucket)
for key in bucket.objects.all():
key.delete()
bucket.delete()
# list
bucket = self.s3resource.Bucket(rdl.listbucket)
for key in bucket.objects.all():
key.delete()
bucket.delete()
# search
bucket = self.s3resource.Bucket(rdl.searchbucket)
for key in bucket.objects.all():
key.delete()
bucket.delete()
# target
bucket = self.s3resource.Bucket(rdl.targetbucket)
for key in bucket.objects.all():
key.delete()
bucket.delete()
@testCall
def test_renderHtml_read_writes(self):
# process_json_files
print('\nTest 1 - process json file\n')
self.read_content,self.log = rdl.process_json_files(JSON_FILE, rdl.listbucket,self.log)
print('read content: {}'.format(self.read_content))
# check asserts
self.assertEqual(JSON_ORDERED, self.read_content)
self.assertEqual(self.log,{})
print('\nTest 2 - process json file not present in bucket\n')
self.read_content,self.log = rdl.process_json_files(OTHER_JSON, rdl.listbucket,self.log)
print('read content: {}'.format(self.read_content))
# check asserts
self.assertEqual({}, self.read_content)
log_message = 'File: {} not processed - does not exist'.format(OTHER_JSON)
self.assertEqual(self.log[OTHER_JSON],log_message)
# write_source_json
self.log = {}
print('\nTest 3 - write_source_json\n')
self.log = rdl.write_source_json(rdl.listbucket,JSON_FILE_2,JSON_TEXT_2,self.log)
# save local copy of log
write_log = self.log
print(write_log)
# read file for confirmation - reset log
self.log = {}
processed,self.read_content,self.log = rdl.get_content_from_s3(JSON_FILE_2, rdl.listbucket,self.log)
# check asserts
self.assertEqual(processed,True)
self.assertEqual(JSON_TEXT_2, self.read_content)
self.assertEqual(self.log,{})
log_message = 'JSON File: {} processed and updated'.format(JSON_FILE_2)
self.assertEqual(write_log[JSON_FILE_2],log_message)
# update_list_json_info
# - any json can be used
print('\nTest 4 - update list json info\n')
self.log = {}
info = JSON_TEXT
filename = JSON_FILE
self.log = rdl.update_list_json_info(info,filename,self.log)
print(self.log)
log_message = 'JSON File: {} processed and updated'.format(JSON_FILE)
self.assertEqual(self.log[JSON_FILE],log_message)
# get_site_settings
print('\nTest 5 - get settings - file does not contain usable data so default settings returned with empty section defaults\n')
self.log = {}
self.read_content,self.log = rdl.get_site_settings(self.log)
settings = libs.globals.DEFAULT_SETTINGS
# set section html to empty
settings['default_header'] = ''
settings['default_before'] = ''
settings['default_main'] = ''
settings['default_after'] = ''
settings['default_sidebar'] = ''
settings['default_footer'] = ''
print('Log: {}'.format(self.log))
print('Settings content {}:'.format(self.read_content))
self.assertEqual(settings, self.read_content)
self.assertEqual(self.log,{})
print('\nTest 6 - get settings - no settings file in bucket\n')
# delete settings file from bucket
self.s3client.delete_object(
Bucket=rdl.sourcebucket,
Key='settings.txt')
self.log = {}
self.read_content,self.log = rdl.get_site_settings(self.log)
print('Log: {}'.format(self.log))
print('Settings content {}:'.format(self.read_content))
log_message = 'File: settings.txt not processed - does not exist'
self.assertEqual(libs.constants.PCOM_NO_ENTRY, self.read_content)
self.assertEqual(self.log['settings.txt'],log_message)
# update_dependencies
print('\nTest 7 - update dependencies - check content\n')
# create htmlOut class component
postlist = {}
file = 'test1.post'
self.log = {}
htmlOut = libs.classes.HtmlOut('', self.log, settings, LIST_META_DEFAULT, file, DEPENDENCIES_DEFAULT, postlist)
htmlOut = rdl.update_dependencies(htmlOut)
print(htmlOut.log)
# check data
# read file for confirmation - reset log
processed,self.read_content,self.log = rdl.get_content_from_s3(rdl.dep_file, rdl.listbucket,self.log)
# python library defined in json format
self.assertEqual(DEPENDENCIES_DEFAULT, json.loads(self.read_content))
# write to search - use htmlOut
print('\nTest 8 - write to search - check content\n')
self.log = {}
htmlOut.raw_content = STANDARD_TEXT
htmlOut.log = {'search_content': []}
log_message = 'File test1.post raw content output as test1.post.content'
htmlOut = rdl.write_to_search(htmlOut)
print(htmlOut.log)
# read file for confirmation - reset log
processed,self.read_content,self.log = rdl.get_content_from_s3('test1.post.content', rdl.searchbucket,self.log)
# raw text
self.assertEqual(STANDARD_TEXT, self.read_content)
self.assertEqual(htmlOut.log['search_content'],[log_message])
print('\nTest 9 - write to search - no data\n')
self.log = {}
htmlOut.raw_content = ''
htmlOut.filename = 'test2.post'
htmlOut.log = {'search_content': []}
htmlOut = rdl.write_to_search(htmlOut)
print('Log message: {}'.format(htmlOut.log))
# read file for confirmation - reset log
processed,self.read_content,self.log = rdl.get_content_from_s3('test2.post.content', rdl.searchbucket,self.log)
# raw text
self.assertFalse(processed)
self.assertEqual(htmlOut.log['search_content'],[])
# update_list_meta_files - use htmlOut
print('\nTest 10 - update list meta - check content\n')
htmlOut.log = {}
htmlOut = rdl.update_list_meta_files(htmlOut,LIST_META_DEFAULT)
print(htmlOut.log)
# check data
# read category and author file for confirmation - reset log
processed,self.read_content,self.log = rdl.get_content_from_s3(rdl.cat_file, rdl.listbucket,self.log)
self.assertEqual(CATEGORIES_DEFAULT, json.loads(self.read_content))
processed,self.read_content,self.log = rdl.get_content_from_s3(rdl.authors_file, rdl.listbucket,self.log)
self.assertEqual(AUTHORS_DEFAULT, json.loads(self.read_content))
# write to buckets - use htmlOut
print('\nTest 11 - write_to_buckets - standard text\n')
self.log = {}
htmlOut.filename = 'test3.page'
htmlOut.html = STANDARD_TEXT
htmlOut.log = {}
htmlOut.meta['url'] = '/test3/'
htmlOut = rdl.write_to_buckets(htmlOut)
print(htmlOut.log)
log_message = 'File: test3.page processed and output as test3/index.html'
self.assertEqual(htmlOut.log['test3.page'],log_message)
# content in target bucket
processed,self.read_content,self.log = rdl.get_content_from_s3('test3/index.html', rdl.targetbucket,self.log)
self.assertEqual(STANDARD_TEXT, self.read_content)
print('\n\ntest3/index.html : {}'.format(self.read_content))
# update postlist - use htmlOut
print('\nTest 12 - update postlist\n')
self.log = {}
htmlOut.log = {}
htmlOut.postlist = self.postlist_default
htmlOut = rdl.update_postlist(htmlOut)
print(htmlOut.log)
processed,self.read_content,self.log = rdl.get_content_from_s3(POSTLIST_FILE, rdl.listbucket,self.log)
self.assertEqual(self.postlist_default, json.loads(self.read_content))
# update archive - use htmlOut
print('\nTest 13 - update archive\n')
self.log = {}
self.settings_content = libs.globals.DEFAULT_SETTINGS
self.log = rdl.update_archive_info(self.archive_default,self.postlist_default,self.settings_content,ARCHIVE_FILE,self.log)
print(self.log)
processed,self.read_content,self.log = rdl.get_content_from_s3(ARCHIVE_FILE, rdl.listbucket,self.log)
self.assertEqual(self.archive_default, json.loads(self.read_content))
log_messsage = 'JSON File: archive.json processed and updated'
self.assertEqual(self.log['archive.json'], log_messsage)
if __name__ == '__main__':
unittest.main()
| [] | [] | [
"TARGET_BUCKET",
"SEARCH_BUCKET",
"SOURCE_BUCKET",
"LIST_BUCKET"
] | [] | ["TARGET_BUCKET", "SEARCH_BUCKET", "SOURCE_BUCKET", "LIST_BUCKET"] | python | 4 | 0 | |
search.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import re
from datetime import timedelta
from functools import update_wrapper
import networkx as nx
import nltk
from flask import Flask, make_response, request, current_app
from future import standard_library
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, KeywordsOptions, SentimentOptions
from stackapi import StackAPI
app = Flask(__name__, static_url_path='', static_folder='frontend')
standard_library.install_aliases()
stack = StackAPI("stackoverflow")
nlu = NaturalLanguageUnderstandingV1(
iam_apikey="1A3R_SmipU_wkdOjJRwSxZPmn6dIgriROn4M6zngTR3v", version="2018-11-16",
url="https://gateway-lon.watsonplatform.net/natural-language-understanding/api")
def get_keywords(sentence):
"""Fetches the keywords of the given sentence using IBM Watson Natural Language Understanding API"""
keywords = []
response = nlu.analyze(
text=sentence,
language="en",
features=Features(keywords=KeywordsOptions())).get_result()
for keyword_obj in response['keywords']:
keywords.append(keyword_obj["text"].lower())
return separate_elements(keywords)
def space_separated_elements(array):
"""Converts the question_tags array to a space delimited string."""
string = ""
for element in array:
string = string + element + " "
return string
def separate_elements(array):
"""splits the strings, delimited by whitespace in the provided list and adds each newly formed string
to the returned list"""
list_a = []
for element in array:
list_a.extend(element.split(" "))
return list_a
def get_questions_stackoverflow(query):
"""Fetches the questions from StackAPI using the query provided"""
stack.page_size = 50
stack.max_pages = 1
res = stack.fetch("search/advanced", q=query, sort="relevance", accepted=True,
filter="withbody")
return res
def get_answers_stackoverflow(question_ids):
"""Fetches the answers from StackAPI corresponding the question_ids provided"""
stack.page_size = 100
stack.max_pages = 1
res = stack.fetch("questions/{ids}/answers", ids=question_ids, sort="votes", filter="withbody")
return res
def get_comments_stackoverflow(answer_ids):
"""Fetches the comments from StackAPI corresponding to the answer_ids provided"""
stack.page_size = 100
stack.max_pages = 1
res = stack.fetch("answers/{ids}/comments", ids=answer_ids, sort="creation", filter="withbody")
return res
def analyse_sentiment(sentence):
"""Calculates the compound index of the sentence using IBM Watson Natural Language Understanding API"""
response = nlu.analyze(
text=sentence,
language="en",
features=Features(sentiment=SentimentOptions())).get_result()
return float(response["sentiment"]["document"]["score"])
def crossdomain(origin=None, methods=None, headers=None, max_age=21600, attach_to_all=True, automatic_options=True):
"""Allows cross domain access of the Flask route decorated with this decorator"""
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route("/")
@crossdomain(origin='*')
def root():
"""Method invoked for the root route of the Web Application"""
return app.send_static_file('index.html')
@app.route("/api/<query>/<answer_limit>")
@crossdomain(origin='*')
def searchw(query=None, answer_limit=50):
"""Method invoked for the api route of the Web application"""
return search(query, answer_limit)
def search(query=None, answer_limit=50):
"""Searches StackOverflow for solutions corresponding to query, limited by answer_limit. Returns a list
of elements containing index, question and answer."""
answer_limit = int(answer_limit)
if query is None:
return json.dumps({"error": "Enter a query to search."})
question_tags = get_keywords(query)
print("Extracted tags: ", end="")
print(question_tags)
result_json_q = get_questions_stackoverflow(space_separated_elements(question_tags))
if len(result_json_q["items"]) < answer_limit:
for i in range(0, len(question_tags) - 2):
result_json_q["items"].extend(get_questions_stackoverflow(
str(question_tags[i]) + " " + str(question_tags[i + 1]) + " " + str(question_tags[i + 2]))["items"])
print("Got " + str(len(result_json_q["items"])) + " questions... processing.")
questions_tags = {}
edges = []
nodes = []
questions = {}
if len(result_json_q["items"]) > 0:
for question_b in result_json_q["items"]:
if isinstance(question_b, dict):
tags = list(set(get_keywords(question_b["title"])) | set(question_b["tags"]))
questions_tags[int(question_b["question_id"])] = tags
questions[int(question_b["question_id"])] = question_b
questions[int(question_b["question_id"])]["answer_scores"] = {}
for tag1 in tags:
if not (tag1 in nodes):
nodes.append(tag1)
for tag2 in tags:
if not (tag1 is tag2):
edges.append((tag1, tag2))
print("Ranking questions... ")
graph = nx.Graph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
probable_paths = []
for source in re.findall(r'\w+', query):
for destination in question_tags:
if not (source is destination) and (source in nodes) and (destination in nodes):
probable_paths.extend(nx.all_shortest_paths(graph, source, destination))
question_scores = {}
for question_b in questions.values():
score = 0.0
tag_count = 0.0
for path in probable_paths:
tags = questions_tags[int(question_b["question_id"])]
for tag in tags:
if tag in path:
score = score + 1
tag_count = tag_count + 1
distance = nltk.edit_distance(query, question_b['title'])
question_scores[int(question_b["question_id"])] = (((1.0 / distance) if distance != 0 else 1) + (
0 if tag_count == 0 else score / tag_count)) / 2
answers = {}
questions_sorted = sorted(question_scores, key=lambda ind: int(question_scores.get(ind) * 10000), reverse=True)[
:answer_limit]
print("Done.")
print("Fetching and ranking answers based on comments...")
result_json_a = get_answers_stackoverflow(questions_sorted)
max_score = 1
for answer in result_json_a["items"]:
answers[int(answer["answer_id"])] = answer
ascore = int(answer["score"])
if ascore > max_score:
max_score = ascore
result_json_c = get_comments_stackoverflow(answers.keys())
comments = {}
for comment in result_json_c["items"]:
if comment["post_id"] in comments:
comments[int(comment["post_id"])].append(comment)
else:
comments[int(comment["post_id"])] = [comment]
for answer in answers.values():
score = 0.0
count = 1
a_score = int(answer["score"])
accepted = bool(answer["is_accepted"])
if int(answer["answer_id"]) in comments.keys():
t_comments = comments[int(answer["answer_id"])]
for comment in t_comments:
score = score + analyse_sentiment(comment["body"])
count = count + 1
questions[int(answer["question_id"])]["answer_scores"][
int(answer["answer_id"])] = (a_score / max_score + score / count + (
0.5 if accepted else 0)) / 3
print("Done.")
print("Picking top answers for questions...")
results = []
index = 0
for question_id in questions_sorted:
question = questions[question_id]
answer_id = max(question["answer_scores"], key=question["answer_scores"].get)
results.append({"index": index, "question": question, "answer": answers[answer_id]})
index = index + 1
print("Done.")
return json.dumps(results)
port = int(os.getenv('PORT', 5000))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
| [] | [] | [
"PORT"
] | [] | ["PORT"] | python | 1 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/cubisstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *cubis_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("cubis-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| [] | [] | [
"XGETTEXT"
] | [] | ["XGETTEXT"] | python | 1 | 0 | |
individual2.py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import xml.etree.ElementTree as ET
from dataclasses import dataclass, field
from typing import List
# Использовать словарь, содержащий следующие ключи: фамилия, имя; номер телефона;
# дата рождения (список из трех чисел). Написать программу, выполняющую следующие
# действия: ввод с клавиатуры данных в список, состоящий из словарей заданной структуры;
# записи должны быть размещены по алфавиту; вывод на экран информации о людях, чьи
# дни рождения приходятся на месяц, значение которого введено с клавиатуры; если таких
# нет, выдать на дисплей соответствующее сообщение.
@dataclass(frozen=True)
class Worker:
las_name: str
name: str
tel: int
date: list
@dataclass
class Staff:
workers: List[Worker] = field(default_factory=lambda: [])
def add(self, las_name, name, tel, date):
self.workers.append(
Worker(
las_name=las_name,
name=name,
tel=tel,
date=date
)
)
self.workers.sort(key=lambda worker: worker.name)
def __str__(self):
# Заголовок таблицы.
table = []
line = "+-{}-+-{}-+-{}-+-{}-+-{}-+".format(
'-' * 4,
'-' * 15,
'-' * 15,
'-' * 20,
'-' * 20
)
table.append(line)
table.append((
"| {:^4} | {:^15} | {:^15} | {:^20} | {:^20} |".format(
"№",
"Фамилия",
"Имя",
"Телефон",
"Дата рождения"
)
)
)
table.append(line)
# Вывести данные о всех сотрудниках.
for idx, worker in enumerate(self.workers, 1):
table.append(
'| {:>4} | {:<15} | {:<15} | {:>20} | {:^20} |'.format(
idx,
worker.las_name,
worker.name,
worker.tel,
".".join(map(str, worker.date))
)
)
table.append(line)
return '\n'.join(table)
def load(self, filename):
with open(filename, 'r', encoding='utf8') as fin:
xml = fin.read()
parser = ET.XMLParser(encoding="utf8")
tree = ET.fromstring(xml, parser=parser)
self.workers = []
for worker_element in tree:
las_name, name, tel, date = None, None, None, None
for element in worker_element:
if element.tag == 'las_name':
las_name = element.text
elif element.tag == 'name':
name = element.text
elif element.tag == 'tel':
tel = int(element.text)
elif element.tag == 'date':
date = list(map(int, element.text.split(" ")))
if las_name is not None and name is not None \
and tel is not None and date is not None:
self.workers.append(
Worker(
las_name=las_name,
name=name,
tel=tel,
date=date
)
)
def save(self, filename):
root = ET.Element('workers')
for worker in self.workers:
worker_element = ET.Element('worker')
las_name_element = ET.SubElement(worker_element, 'las_name')
las_name_element.text = worker.las_name
name_element = ET.SubElement(worker_element, 'name')
name_element.text = worker.name
tel_element = ET.SubElement(worker_element, 'tel')
tel_element.text = worker.tel
date_element = ET.SubElement(worker_element, 'date')
date_element.text = ' '.join(map(str, worker.date))
root.append(worker_element)
tree = ET.ElementTree(root)
with open(filename, 'wb') as fout:
tree.write(fout, encoding='utf8', xml_declaration=True)
if __name__ == '__main__':
staff = Staff()
while True:
command = input("Enter command> ").lower()
if command == "exit":
break
elif command == "add":
las_name = str(input("Enter last name> "))
name = str(input("Enter first name> "))
tel = str(input("Enter phone> +"))
date = list(map(int, input("Enter birthdate separated by space> ").split(" ")))
staff.add(las_name, name, tel, date)
elif command == "list":
print(staff)
elif command.startswith('load '):
# Разбить команду на части для выделения имени файла.
parts = command.split(' ', maxsplit=1)
staff.load(parts[1])
elif command.startswith('save '):
# Разбить команду на части для выделения имени файла.
parts = command.split(' ', maxsplit=1)
staff.save(parts[1])
elif command == 'help':
# Вывести справку о работе с программой.
print("Список команд:\n")
print("add - добавить работника;")
print("list - вывести список работников;")
print("task - вывести сотрудников определенной даты рождения")
print("load <имя файла> - загрузить данные из файла;")
print("save <имя файла> - сохранить данные в файл;")
print("exit - выход из программы;")
else:
print(f"Неизвестная команда {command}", file=sys.stderr) | [] | [] | [] | [] | [] | python | null | null | null |
pydash/views.py | # The MIT License (MIT)
#
# Copyright (c) 2014 Florian Neagu - [email protected] - https://github.com/k3oni/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import platform
import os
import multiprocessing
from datetime import timedelta
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
import time
lat=0.1
def home(request):
global lat
try:
#set default to 0.1
time.sleep(lat)
# int(os.environ['latency']))
except:
pass
return render_to_response('index.html', context_instance=RequestContext(request))
def latup(request):
global lat
try:
lat=int(request.GET.get('value', lat))
except:
return HttpResponse(401)
return HttpResponse(200)
def set_lat(l):
global lat
try:
lat=int(l)
except:
pass
return lat
def get_lat():
global lat
return lat | [] | [] | [
"latency"
] | [] | ["latency"] | python | 1 | 0 | |
cmd/root.go | /*
MIT License
Copyright (c) 2021 Rally Health, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
"context"
"fmt"
"log"
"net/http"
"os"
"time"
"github.com/rallyhealth/goose/pkg"
"github.com/kireledan/gojenkins"
"github.com/spf13/cobra"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
var cfgFile string
var Jenky *gojenkins.Jenkins
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "goose",
Short: "A Jenkins CLI",
Long: `The unofficial way to interact with Jenkins
___
,-"" '.
,' _ e )'-._
/ ,' '-._<.===-' HONK HONK
/ /
/ ;
_ / ;
('._ _.-"" ""--..__,' |
<_ '-"" \
<'- :
(__ <__. ;
'-. '-.__. _.' /
\ '-.__,-' _,'
'._ , /__,-'
""._\__,'< <____
| | '----.'.
| | \ '.
; |___ \-''
\ --<
'.'.<
'-'`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
var err error
user := os.Getenv("JENKINS_EMAIL")
key := os.Getenv("JENKINS_API_KEY")
rootJenkins := os.Getenv("JENKINS_ROOT_URL")
loginJenkins := os.Getenv("JENKINS_LOGIN_URL")
if user == "" || key == "" || rootJenkins == "" || loginJenkins == "" {
fmt.Println("Please define $JENKINS_EMAIL and $JENKINS_API_KEY and $JENKINS_ROOT_URL and $JENKINS_LOGIN_URL")
os.Exit(1)
}
clientWithTimeout := http.Client{
Timeout: 3 * time.Second,
}
Jenky, err = gojenkins.CreateJenkins(&clientWithTimeout, fmt.Sprintf("%s", loginJenkins), user, key).Init(context.TODO())
if err != nil {
fmt.Println("Error connecting to jenkins. Make sure you are able to reach your jenkins instance. Is it on a VPN?")
log.Fatal(err)
}
status, err := Jenky.Poll(context.TODO())
if status == 401 {
log.Fatal("Invalid credentials. Double check your jenkins envs JENKINS_EMAIL and JENKINS_API_KEY")
}
pkg.RefreshJobIndex(Jenky)
},
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
viper.SetDefault("author", "kireledan [email protected]")
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.goose.yaml)")
// Cobra also supports local flags, which will only run
// when this action is called directly.
rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Search config in home directory with name ".goose" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".goose")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
| [
"\"JENKINS_EMAIL\"",
"\"JENKINS_API_KEY\"",
"\"JENKINS_ROOT_URL\"",
"\"JENKINS_LOGIN_URL\""
] | [] | [
"JENKINS_EMAIL",
"JENKINS_API_KEY",
"JENKINS_LOGIN_URL",
"JENKINS_ROOT_URL"
] | [] | ["JENKINS_EMAIL", "JENKINS_API_KEY", "JENKINS_LOGIN_URL", "JENKINS_ROOT_URL"] | go | 4 | 0 | |
nni/retiarii/integration.py | import logging
import os
from typing import Any, Callable
import json_tricks
from nni.runtime.msg_dispatcher_base import MsgDispatcherBase
from nni.runtime.protocol import CommandType, send
from nni.utils import MetricType
from .graph import MetricData
from .execution.base import BaseExecutionEngine
from .execution.cgo_engine import CGOExecutionEngine
from .execution.api import set_execution_engine
from .integration_api import register_advisor
_logger = logging.getLogger(__name__)
class RetiariiAdvisor(MsgDispatcherBase):
"""
The class is to connect Retiarii components to NNI backend.
It will function as the main thread when running a Retiarii experiment through NNI.
Strategy will be launched as its thread, who will call APIs in execution engine. Execution
engine will then find the advisor singleton and send payloads to advisor.
When metrics are sent back, advisor will first receive the payloads, who will call the callback
function (that is a member function in graph listener).
The conversion advisor provides are minimum. It is only a send/receive module, and execution engine
needs to handle all the rest.
FIXME
How does advisor exit when strategy exists?
Attributes
----------
send_trial_callback
request_trial_jobs_callback
trial_end_callback
intermediate_metric_callback
final_metric_callback
"""
def __init__(self):
super(RetiariiAdvisor, self).__init__()
register_advisor(self) # register the current advisor as the "global only" advisor
self.search_space = None
self.send_trial_callback: Callable[[dict], None] = None
self.request_trial_jobs_callback: Callable[[int], None] = None
self.trial_end_callback: Callable[[int, bool], None] = None
self.intermediate_metric_callback: Callable[[int, MetricData], None] = None
self.final_metric_callback: Callable[[int, MetricData], None] = None
self.parameters_count = 0
engine = self._create_execution_engine()
set_execution_engine(engine)
def _create_execution_engine(self):
if os.environ.get('CGO') == 'true':
return CGOExecutionEngine()
else:
return BaseExecutionEngine()
def handle_initialize(self, data):
"""callback for initializing the advisor
Parameters
----------
data: dict
search space
"""
self.handle_update_search_space(data)
send(CommandType.Initialized, '')
def send_trial(self, parameters):
"""
Send parameters to NNI.
Parameters
----------
parameters : Any
Any payload.
Returns
-------
int
Parameter ID that is assigned to this parameter,
which will be used for identification in future.
"""
self.parameters_count += 1
new_trial = {
'parameter_id': self.parameters_count,
'parameters': parameters,
'parameter_source': 'algorithm'
}
_logger.info('New trial sent: %s', new_trial)
send(CommandType.NewTrialJob, json_tricks.dumps(new_trial))
if self.send_trial_callback is not None:
self.send_trial_callback(parameters) # pylint: disable=not-callable
return self.parameters_count
def handle_request_trial_jobs(self, num_trials):
_logger.info('Request trial jobs: %s', num_trials)
if self.request_trial_jobs_callback is not None:
self.request_trial_jobs_callback(num_trials) # pylint: disable=not-callable
def handle_update_search_space(self, data):
_logger.info('Received search space: %s', data)
self.search_space = data
def handle_trial_end(self, data):
_logger.info('Trial end: %s', data)
self.trial_end_callback(json_tricks.loads(data['hyper_params'])['parameter_id'], # pylint: disable=not-callable
data['event'] == 'SUCCEEDED')
def handle_report_metric_data(self, data):
_logger.info('Metric reported: %s', data)
if data['type'] == MetricType.REQUEST_PARAMETER:
raise ValueError('Request parameter not supported')
elif data['type'] == MetricType.PERIODICAL:
self.intermediate_metric_callback(data['parameter_id'], # pylint: disable=not-callable
self._process_value(data['value']))
elif data['type'] == MetricType.FINAL:
self.final_metric_callback(data['parameter_id'], # pylint: disable=not-callable
self._process_value(data['value']))
@staticmethod
def _process_value(value) -> Any: # hopefully a float
value = json_tricks.loads(value)
if isinstance(value, dict):
if 'default' in value:
return value['default']
else:
return value
return value
| [] | [] | [
"CGO"
] | [] | ["CGO"] | python | 1 | 0 | |
models/gift_message_data_message_extension_interface.go | // Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// GiftMessageDataMessageExtensionInterface ExtensionInterface class for @see \Magento\GiftMessage\Api\Data\MessageInterface
//
// swagger:model gift-message-data-message-extension-interface
type GiftMessageDataMessageExtensionInterface struct {
// entity id
EntityID string `json:"entity_id,omitempty"`
// entity type
EntityType string `json:"entity_type,omitempty"`
}
// Validate validates this gift message data message extension interface
func (m *GiftMessageDataMessageExtensionInterface) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this gift message data message extension interface based on context it is used
func (m *GiftMessageDataMessageExtensionInterface) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *GiftMessageDataMessageExtensionInterface) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *GiftMessageDataMessageExtensionInterface) UnmarshalBinary(b []byte) error {
var res GiftMessageDataMessageExtensionInterface
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
| [] | [] | [] | [] | [] | go | null | null | null |
starport/pkg/xchisel/xchisel.go | package xchisel
import (
"context"
"fmt"
"os"
"time"
chclient "github.com/jpillora/chisel/client"
chserver "github.com/jpillora/chisel/server"
)
var DefaultServerPort = "7575"
func ServerAddr() string {
return os.Getenv("CHISEL_ADDR")
}
func IsEnabled() bool {
return ServerAddr() != ""
}
func StartServer(ctx context.Context, port string) error {
s, err := chserver.NewServer(&chserver.Config{})
if err != nil {
return err
}
if err := s.StartContext(ctx, "127.0.0.1", port); err != nil {
return err
}
if err = s.Wait(); err == context.Canceled {
return nil
}
return err
}
func StartClient(ctx context.Context, serverAddr, localPort, remotePort string) error {
c, err := chclient.NewClient(&chclient.Config{
MaxRetryInterval: time.Second,
MaxRetryCount: -1,
Server: serverAddr,
Remotes: []string{fmt.Sprintf("127.0.0.1:%s:127.0.0.1:%s", localPort, remotePort)},
})
if err != nil {
return err
}
c.Logger.Info = false
c.Logger.Debug = false
if err := c.Start(ctx); err != nil {
return err
}
if err = c.Wait(); err == context.Canceled {
return nil
}
return err
}
| [
"\"CHISEL_ADDR\""
] | [] | [
"CHISEL_ADDR"
] | [] | ["CHISEL_ADDR"] | go | 1 | 0 | |
form_test.go | package restconf
import (
"context"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"os"
"strings"
"testing"
"github.com/freeconf/yang/node"
"github.com/freeconf/yang/nodeutil"
"github.com/freeconf/yang/parser"
)
type handlerImpl http.HandlerFunc
func (impl handlerImpl) ServeHTTP(w http.ResponseWriter, r *http.Request) {
impl(w, r)
}
func TestForm(t *testing.T) {
m, err := parser.LoadModuleFromString(nil, `
module test {
rpc x {
input {
leaf a {
type string;
}
anydata b;
}
}
}
`)
if err != nil {
t.Fatal(err)
}
done := make(chan bool, 2)
handler := func(w http.ResponseWriter, r *http.Request) {
b := node.NewBrowser(m, formDummyNode(t))
input, err := requestNode(r)
chkErr(t, err)
resp := b.Root().Find("x").Action(input)
chkErr(t, resp.LastErr)
w.Write([]byte("ok"))
done <- true
}
srv := &http.Server{Addr: "127.0.0.1:9999", Handler: handlerImpl(handler)}
go func() {
srv.ListenAndServe()
}()
post(t)
<-done
srv.Shutdown(context.TODO())
}
func chkErr(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
func post(t *testing.T) {
if "true" == os.Getenv("TRAVIS") {
t.Skip()
return
}
rdr, wtr := io.Pipe()
wait := make(chan bool, 2)
form := multipart.NewWriter(wtr)
go func() {
req, err := http.NewRequest("POST", "http://127.0.0.1:9999", rdr)
chkErr(t, err)
req.Header.Set("Content-Type", form.FormDataContentType())
_, err = http.DefaultClient.Do(req)
chkErr(t, err)
wait <- true
}()
dataPart, err := form.CreateFormField("a")
chkErr(t, err)
_, err = io.Copy(dataPart, strings.NewReader("hello"))
chkErr(t, err)
filePart, err := form.CreateFormFile("b", "b")
chkErr(t, err)
_, err = io.Copy(filePart, strings.NewReader("hello world"))
chkErr(t, err)
chkErr(t, form.Close())
chkErr(t, wtr.Close())
<-wait
}
func formDummyNode(t *testing.T) node.Node {
return &nodeutil.Basic{
OnAction: func(r node.ActionRequest) (node.Node, error) {
v, err := r.Input.GetValue("a")
chkErr(t, err)
if v.String() != "hello" {
t.Error(v.String())
}
v, err = r.Input.GetValue("b")
chkErr(t, err)
rdr, valid := v.Value().(io.Reader)
if !valid {
panic("invalid")
}
actual, err := ioutil.ReadAll(rdr)
chkErr(t, err)
if string(actual) != "hello world" {
t.Error(actual)
}
//defer rdr.Close()
fmt.Printf(string(actual))
return nil, nil
},
}
}
| [
"\"TRAVIS\""
] | [] | [
"TRAVIS"
] | [] | ["TRAVIS"] | go | 1 | 0 | |
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentHashMap;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.crypto.SecretKey;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
import org.apache.hadoop.mapreduce.task.ReduceContextImpl;
import org.apache.hadoop.mapreduce.util.MRJobConfUtil;
import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
/**
* Base class for tasks.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
abstract public class Task implements Writable, Configurable {
private static final Log LOG =
LogFactory.getLog(Task.class);
public static String MERGED_OUTPUT_PREFIX = ".merged";
public static final long DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS = 10000;
/**
* @deprecated Provided for compatibility. Use {@link TaskCounter} instead.
*/
@Deprecated
public enum Counter {
MAP_INPUT_RECORDS,
MAP_OUTPUT_RECORDS,
MAP_SKIPPED_RECORDS,
MAP_INPUT_BYTES,
MAP_OUTPUT_BYTES,
MAP_OUTPUT_MATERIALIZED_BYTES,
COMBINE_INPUT_RECORDS,
COMBINE_OUTPUT_RECORDS,
REDUCE_INPUT_GROUPS,
REDUCE_SHUFFLE_BYTES,
REDUCE_INPUT_RECORDS,
REDUCE_OUTPUT_RECORDS,
REDUCE_SKIPPED_GROUPS,
REDUCE_SKIPPED_RECORDS,
SPILLED_RECORDS,
SPLIT_RAW_BYTES,
CPU_MILLISECONDS,
PHYSICAL_MEMORY_BYTES,
VIRTUAL_MEMORY_BYTES,
COMMITTED_HEAP_BYTES,
MAP_PHYSICAL_MEMORY_BYTES_MAX,
MAP_VIRTUAL_MEMORY_BYTES_MAX,
REDUCE_PHYSICAL_MEMORY_BYTES_MAX,
REDUCE_VIRTUAL_MEMORY_BYTES_MAX
}
/**
* Counters to measure the usage of the different file systems.
* Always return the String array with two elements. First one is the name of
* BYTES_READ counter and second one is of the BYTES_WRITTEN counter.
*/
protected static String[] getFileSystemCounterNames(String uriScheme) {
String scheme = StringUtils.toUpperCase(uriScheme);
return new String[]{scheme+"_BYTES_READ", scheme+"_BYTES_WRITTEN"};
}
/**
* Name of the FileSystem counters' group
*/
protected static final String FILESYSTEM_COUNTER_GROUP = "FileSystemCounters";
///////////////////////////////////////////////////////////
// Helper methods to construct task-output paths
///////////////////////////////////////////////////////////
/** Construct output file names so that, when an output directory listing is
* sorted lexicographically, positions correspond to output partitions.*/
private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
static {
NUMBER_FORMAT.setMinimumIntegerDigits(5);
NUMBER_FORMAT.setGroupingUsed(false);
}
static synchronized String getOutputName(int partition) {
return "part-" + NUMBER_FORMAT.format(partition);
}
////////////////////////////////////////////
// Fields
////////////////////////////////////////////
private String jobFile; // job configuration file
private String user; // user running the job
private TaskAttemptID taskId; // unique, includes job id
private int partition; // id within job
private byte[] encryptedSpillKey = new byte[] {0}; // Key Used to encrypt
// intermediate spills
TaskStatus taskStatus; // current status of the task
protected JobStatus.State jobRunStateForCleanup;
protected boolean jobCleanup = false;
protected boolean jobSetup = false;
protected boolean taskCleanup = false;
// An opaque data field used to attach extra data to each task. This is used
// by the Hadoop scheduler for Mesos to associate a Mesos task ID with each
// task and recover these IDs on the TaskTracker.
protected BytesWritable extraData = new BytesWritable();
//skip ranges based on failed ranges from previous attempts
private SortedRanges skipRanges = new SortedRanges();
private boolean skipping = false;
private boolean writeSkipRecs = true;
//currently processing record start index
private volatile long currentRecStartIndex;
private Iterator<Long> currentRecIndexIterator =
skipRanges.skipRangeIterator();
private ResourceCalculatorProcessTree pTree;
private long initCpuCumulativeTime = ResourceCalculatorProcessTree.UNAVAILABLE;
protected JobConf conf;
protected MapOutputFile mapOutputFile;
protected LocalDirAllocator lDirAlloc;
private final static int MAX_RETRIES = 10;
protected JobContext jobContext;
protected TaskAttemptContext taskContext;
protected org.apache.hadoop.mapreduce.OutputFormat<?,?> outputFormat;
protected org.apache.hadoop.mapreduce.OutputCommitter committer;
protected final Counters.Counter spilledRecordsCounter;
protected final Counters.Counter failedShuffleCounter;
protected final Counters.Counter mergedMapOutputsCounter;
private int numSlotsRequired;
protected TaskUmbilicalProtocol umbilical;
protected SecretKey tokenSecret;
protected SecretKey shuffleSecret;
protected GcTimeUpdater gcUpdater;
final AtomicBoolean mustPreempt = new AtomicBoolean(false);
////////////////////////////////////////////
// Constructors
////////////////////////////////////////////
public Task() {
taskStatus = TaskStatus.createTaskStatus(isMapTask());
taskId = new TaskAttemptID();
spilledRecordsCounter =
counters.findCounter(TaskCounter.SPILLED_RECORDS);
failedShuffleCounter =
counters.findCounter(TaskCounter.FAILED_SHUFFLE);
mergedMapOutputsCounter =
counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
gcUpdater = new GcTimeUpdater();
}
public Task(String jobFile, TaskAttemptID taskId, int partition,
int numSlotsRequired) {
this.jobFile = jobFile;
this.taskId = taskId;
this.partition = partition;
this.numSlotsRequired = numSlotsRequired;
this.taskStatus = TaskStatus.createTaskStatus(isMapTask(), this.taskId,
0.0f, numSlotsRequired,
TaskStatus.State.UNASSIGNED,
"", "", "",
isMapTask() ?
TaskStatus.Phase.MAP :
TaskStatus.Phase.SHUFFLE,
counters);
spilledRecordsCounter = counters.findCounter(TaskCounter.SPILLED_RECORDS);
failedShuffleCounter = counters.findCounter(TaskCounter.FAILED_SHUFFLE);
mergedMapOutputsCounter =
counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
gcUpdater = new GcTimeUpdater();
}
@VisibleForTesting
void setTaskDone() {
taskDone.set(true);
}
////////////////////////////////////////////
// Accessors
////////////////////////////////////////////
public void setJobFile(String jobFile) { this.jobFile = jobFile; }
public String getJobFile() { return jobFile; }
public TaskAttemptID getTaskID() { return taskId; }
public int getNumSlotsRequired() {
return numSlotsRequired;
}
Counters getCounters() { return counters; }
/**
* Get the job name for this task.
* @return the job name
*/
public JobID getJobID() {
return taskId.getJobID();
}
/**
* Set the job token secret
* @param tokenSecret the secret
*/
public void setJobTokenSecret(SecretKey tokenSecret) {
this.tokenSecret = tokenSecret;
}
/**
* Get Encrypted spill key
* @return encrypted spill key
*/
public byte[] getEncryptedSpillKey() {
return encryptedSpillKey;
}
/**
* Set Encrypted spill key
* @param encryptedSpillKey key
*/
public void setEncryptedSpillKey(byte[] encryptedSpillKey) {
if (encryptedSpillKey != null) {
this.encryptedSpillKey = encryptedSpillKey;
}
}
/**
* Get the job token secret
* @return the token secret
*/
public SecretKey getJobTokenSecret() {
return this.tokenSecret;
}
/**
* Set the secret key used to authenticate the shuffle
* @param shuffleSecret the secret
*/
public void setShuffleSecret(SecretKey shuffleSecret) {
this.shuffleSecret = shuffleSecret;
}
/**
* Get the secret key used to authenticate the shuffle
* @return the shuffle secret
*/
public SecretKey getShuffleSecret() {
return this.shuffleSecret;
}
/**
* Get the index of this task within the job.
* @return the integer part of the task id
*/
public int getPartition() {
return partition;
}
/**
* Return current phase of the task.
* needs to be synchronized as communication thread sends the phase every second
* @return the curent phase of the task
*/
public synchronized TaskStatus.Phase getPhase(){
return this.taskStatus.getPhase();
}
/**
* Set current phase of the task.
* @param phase task phase
*/
protected synchronized void setPhase(TaskStatus.Phase phase){
this.taskStatus.setPhase(phase);
}
/**
* Get whether to write skip records.
*/
protected boolean toWriteSkipRecs() {
return writeSkipRecs;
}
/**
* Set whether to write skip records.
*/
protected void setWriteSkipRecs(boolean writeSkipRecs) {
this.writeSkipRecs = writeSkipRecs;
}
/**
* Report a fatal error to the parent (task) tracker.
*/
protected void reportFatalError(TaskAttemptID id, Throwable throwable,
String logMsg) {
LOG.fatal(logMsg);
if (ShutdownHookManager.get().isShutdownInProgress()) {
return;
}
Throwable tCause = throwable.getCause();
String cause = tCause == null
? StringUtils.stringifyException(throwable)
: StringUtils.stringifyException(tCause);
try {
umbilical.fatalError(id, cause);
} catch (IOException ioe) {
LOG.fatal("Failed to contact the tasktracker", ioe);
System.exit(-1);
}
}
/**
* Gets a handle to the Statistics instance based on the scheme associated
* with path.
*
* @param path the path.
* @param conf the configuration to extract the scheme from if not part of
* the path.
* @return a Statistics instance, or null if none is found for the scheme.
*/
protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
path = path.getFileSystem(conf).makeQualified(path);
String scheme = path.toUri().getScheme();
for (Statistics stats : FileSystem.getAllStatistics()) {
if (stats.getScheme().equals(scheme)) {
matchedStats.add(stats);
}
}
return matchedStats;
}
/**
* Get skipRanges.
*/
public SortedRanges getSkipRanges() {
return skipRanges;
}
/**
* Set skipRanges.
*/
public void setSkipRanges(SortedRanges skipRanges) {
this.skipRanges = skipRanges;
}
/**
* Is Task in skipping mode.
*/
public boolean isSkipping() {
return skipping;
}
/**
* Sets whether to run Task in skipping mode.
* @param skipping
*/
public void setSkipping(boolean skipping) {
this.skipping = skipping;
}
/**
* Return current state of the task.
* needs to be synchronized as communication thread
* sends the state every second
* @return task state
*/
synchronized TaskStatus.State getState(){
return this.taskStatus.getRunState();
}
/**
* Set current state of the task.
* @param state
*/
synchronized void setState(TaskStatus.State state){
this.taskStatus.setRunState(state);
}
void setTaskCleanupTask() {
taskCleanup = true;
}
boolean isTaskCleanupTask() {
return taskCleanup;
}
boolean isJobCleanupTask() {
return jobCleanup;
}
boolean isJobAbortTask() {
// the task is an abort task if its marked for cleanup and the final
// expected state is either failed or killed.
return isJobCleanupTask()
&& (jobRunStateForCleanup == JobStatus.State.KILLED
|| jobRunStateForCleanup == JobStatus.State.FAILED);
}
boolean isJobSetupTask() {
return jobSetup;
}
void setJobSetupTask() {
jobSetup = true;
}
void setJobCleanupTask() {
jobCleanup = true;
}
/**
* Sets the task to do job abort in the cleanup.
* @param status the final runstate of the job.
*/
void setJobCleanupTaskState(JobStatus.State status) {
jobRunStateForCleanup = status;
}
boolean isMapOrReduce() {
return !jobSetup && !jobCleanup && !taskCleanup;
}
/**
* Get the name of the user running the job/task. TaskTracker needs task's
* user name even before it's JobConf is localized. So we explicitly serialize
* the user name.
*
* @return user
*/
String getUser() {
return user;
}
void setUser(String user) {
this.user = user;
}
////////////////////////////////////////////
// Writable methods
////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
Text.writeString(out, jobFile);
taskId.write(out);
out.writeInt(partition);
out.writeInt(numSlotsRequired);
taskStatus.write(out);
skipRanges.write(out);
out.writeBoolean(skipping);
out.writeBoolean(jobCleanup);
if (jobCleanup) {
WritableUtils.writeEnum(out, jobRunStateForCleanup);
}
out.writeBoolean(jobSetup);
out.writeBoolean(writeSkipRecs);
out.writeBoolean(taskCleanup);
Text.writeString(out, user);
out.writeInt(encryptedSpillKey.length);
extraData.write(out);
out.write(encryptedSpillKey);
}
public void readFields(DataInput in) throws IOException {
jobFile = StringInterner.weakIntern(Text.readString(in));
taskId = TaskAttemptID.read(in);
partition = in.readInt();
numSlotsRequired = in.readInt();
taskStatus.readFields(in);
skipRanges.readFields(in);
currentRecIndexIterator = skipRanges.skipRangeIterator();
currentRecStartIndex = currentRecIndexIterator.next();
skipping = in.readBoolean();
jobCleanup = in.readBoolean();
if (jobCleanup) {
jobRunStateForCleanup =
WritableUtils.readEnum(in, JobStatus.State.class);
}
jobSetup = in.readBoolean();
writeSkipRecs = in.readBoolean();
taskCleanup = in.readBoolean();
if (taskCleanup) {
setPhase(TaskStatus.Phase.CLEANUP);
}
user = StringInterner.weakIntern(Text.readString(in));
int len = in.readInt();
encryptedSpillKey = new byte[len];
extraData.readFields(in);
in.readFully(encryptedSpillKey);
}
@Override
public String toString() { return taskId.toString(); }
/**
* Localize the given JobConf to be specific for this task.
*/
public void localizeConfiguration(JobConf conf) throws IOException {
conf.set(JobContext.TASK_ID, taskId.getTaskID().toString());
conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
conf.setInt(JobContext.TASK_PARTITION, partition);
conf.set(JobContext.ID, taskId.getJobID().toString());
}
/** Run this task as a part of the named job. This method is executed in the
* child process and is what invokes user-supplied map, reduce, etc. methods.
* @param umbilical for progress reports
*/
public abstract void run(JobConf job, TaskUmbilicalProtocol umbilical)
throws IOException, ClassNotFoundException, InterruptedException;
private transient Progress taskProgress = new Progress();
// Current counters
private transient Counters counters = new Counters();
/* flag to track whether task is done */
private AtomicBoolean taskDone = new AtomicBoolean(false);
public abstract boolean isMapTask();
public Progress getProgress() { return taskProgress; }
public void initialize(JobConf job, JobID id,
Reporter reporter,
boolean useNewApi) throws IOException,
ClassNotFoundException,
InterruptedException {
jobContext = new JobContextImpl(job, id, reporter);
taskContext = new TaskAttemptContextImpl(job, taskId, reporter);
if (getState() == TaskStatus.State.UNASSIGNED) {
setState(TaskStatus.State.RUNNING);
}
if (useNewApi) {
if (LOG.isDebugEnabled()) {
LOG.debug("using new api for output committer");
}
outputFormat =
ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
committer = outputFormat.getOutputCommitter(taskContext);
} else {
committer = conf.getOutputCommitter();
}
Path outputPath = FileOutputFormat.getOutputPath(conf);
if (outputPath != null) {
if ((committer instanceof FileOutputCommitter)) {
FileOutputFormat.setWorkOutputPath(conf,
((FileOutputCommitter)committer).getTaskAttemptPath(taskContext));
} else {
FileOutputFormat.setWorkOutputPath(conf, outputPath);
}
}
committer.setupTask(taskContext);
Class<? extends ResourceCalculatorProcessTree> clazz =
conf.getClass(MRConfig.RESOURCE_CALCULATOR_PROCESS_TREE,
null, ResourceCalculatorProcessTree.class);
pTree = ResourceCalculatorProcessTree
.getResourceCalculatorProcessTree(System.getenv().get("JVM_PID"), clazz, conf);
LOG.info(" Using ResourceCalculatorProcessTree : " + pTree);
if (pTree != null) {
pTree.updateProcessTree();
initCpuCumulativeTime = pTree.getCumulativeCpuTime();
}
}
public static String normalizeStatus(String status, Configuration conf) {
// Check to see if the status string is too long
// and truncate it if needed.
int progressStatusLength = conf.getInt(
MRConfig.PROGRESS_STATUS_LEN_LIMIT_KEY,
MRConfig.PROGRESS_STATUS_LEN_LIMIT_DEFAULT);
if (status.length() > progressStatusLength) {
LOG.warn("Task status: \"" + status + "\" truncated to max limit ("
+ progressStatusLength + " characters)");
status = status.substring(0, progressStatusLength);
}
return status;
}
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class TaskReporter
extends org.apache.hadoop.mapreduce.StatusReporter
implements Runnable, Reporter {
private TaskUmbilicalProtocol umbilical;
private InputSplit split = null;
private Progress taskProgress;
private Thread pingThread = null;
private boolean done = true;
private Object lock = new Object();
/**
* flag that indicates whether progress update needs to be sent to parent.
* If true, it has been set. If false, it has been reset.
* Using AtomicBoolean since we need an atomic read & reset method.
*/
private AtomicBoolean progressFlag = new AtomicBoolean(false);
@VisibleForTesting
public TaskReporter(Progress taskProgress,
TaskUmbilicalProtocol umbilical) {
this.umbilical = umbilical;
this.taskProgress = taskProgress;
}
// getters and setters for flag
void setProgressFlag() {
progressFlag.set(true);
}
boolean resetProgressFlag() {
return progressFlag.getAndSet(false);
}
public void setStatus(String status) {
taskProgress.setStatus(normalizeStatus(status, conf));
// indicate that progress update needs to be sent
setProgressFlag();
}
public void setProgress(float progress) {
// set current phase progress.
// This method assumes that task has phases.
taskProgress.phase().set(progress);
// indicate that progress update needs to be sent
setProgressFlag();
}
public float getProgress() {
return taskProgress.getProgress();
};
public void progress() {
// indicate that progress update needs to be sent
setProgressFlag();
}
public Counters.Counter getCounter(String group, String name) {
Counters.Counter counter = null;
if (counters != null) {
counter = counters.findCounter(group, name);
}
return counter;
}
public Counters.Counter getCounter(Enum<?> name) {
return counters == null ? null : counters.findCounter(name);
}
public void incrCounter(Enum key, long amount) {
if (counters != null) {
counters.incrCounter(key, amount);
}
setProgressFlag();
}
public void incrCounter(String group, String counter, long amount) {
if (counters != null) {
counters.incrCounter(group, counter, amount);
}
if(skipping && SkipBadRecords.COUNTER_GROUP.equals(group) && (
SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS.equals(counter) ||
SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS.equals(counter))) {
//if application reports the processed records, move the
//currentRecStartIndex to the next.
//currentRecStartIndex is the start index which has not yet been
//finished and is still in task's stomach.
for(int i=0;i<amount;i++) {
currentRecStartIndex = currentRecIndexIterator.next();
}
}
setProgressFlag();
}
public void setInputSplit(InputSplit split) {
this.split = split;
}
public InputSplit getInputSplit() throws UnsupportedOperationException {
if (split == null) {
throw new UnsupportedOperationException("Input only available on map");
} else {
return split;
}
}
/**
* exception thrown when the task exceeds some configured limits.
*/
public class TaskLimitException extends IOException {
public TaskLimitException(String str) {
super(str);
}
}
/**
* check the counters to see whether the task has exceeded any configured
* limits.
* @throws TaskLimitException
*/
protected void checkTaskLimits() throws TaskLimitException {
// check the limit for writing to local file system
long limit = conf.getLong(MRJobConfig.TASK_LOCAL_WRITE_LIMIT_BYTES,
MRJobConfig.DEFAULT_TASK_LOCAL_WRITE_LIMIT_BYTES);
if (limit >= 0) {
Counters.Counter localWritesCounter = null;
try {
LocalFileSystem localFS = FileSystem.getLocal(conf);
localWritesCounter = counters.findCounter(localFS.getScheme(),
FileSystemCounter.BYTES_WRITTEN);
} catch (IOException e) {
LOG.warn("Could not get LocalFileSystem BYTES_WRITTEN counter");
}
if (localWritesCounter != null
&& localWritesCounter.getCounter() > limit) {
throw new TaskLimitException("too much write to local file system." +
" current value is " + localWritesCounter.getCounter() +
" the limit is " + limit);
}
}
}
/**
* The communication thread handles communication with the parent (Task
* Tracker). It sends progress updates if progress has been made or if
* the task needs to let the parent know that it's alive. It also pings
* the parent to see if it's alive.
*/
public void run() {
final int MAX_RETRIES = 3;
int remainingRetries = MAX_RETRIES;
// get current flag value and reset it as well
boolean sendProgress = resetProgressFlag();
long taskProgressInterval = MRJobConfUtil.
getTaskProgressReportInterval(conf);
while (!taskDone.get()) {
synchronized (lock) {
done = false;
}
try {
boolean taskFound = true; // whether TT knows about this task
AMFeedback amFeedback = null;
// sleep for a bit
synchronized(lock) {
if (taskDone.get()) {
break;
}
lock.wait(taskProgressInterval);
}
if (taskDone.get()) {
break;
}
if (sendProgress) {
// we need to send progress update
updateCounters();
checkTaskLimits();
taskStatus.statusUpdate(taskProgress.get(),
taskProgress.toString(),
counters);
amFeedback = umbilical.statusUpdate(taskId, taskStatus);
taskFound = amFeedback.getTaskFound();
taskStatus.clearStatus();
}
else {
// send ping
amFeedback = umbilical.statusUpdate(taskId, null);
taskFound = amFeedback.getTaskFound();
}
// if Task Tracker is not aware of our task ID (probably because it died and
// came back up), kill ourselves
if (!taskFound) {
LOG.warn("Parent died. Exiting "+taskId);
resetDoneFlag();
System.exit(66);
}
// Set a flag that says we should preempt this is read by
// ReduceTasks in places of the execution where it is
// safe/easy to preempt
boolean lastPreempt = mustPreempt.get();
mustPreempt.set(mustPreempt.get() || amFeedback.getPreemption());
if (lastPreempt ^ mustPreempt.get()) {
LOG.info("PREEMPTION TASK: setting mustPreempt to " +
mustPreempt.get() + " given " + amFeedback.getPreemption() +
" for "+ taskId + " task status: " +taskStatus.getPhase());
}
sendProgress = resetProgressFlag();
remainingRetries = MAX_RETRIES;
} catch (TaskLimitException e) {
String errMsg = "Task exceeded the limits: " +
StringUtils.stringifyException(e);
LOG.fatal(errMsg);
try {
umbilical.fatalError(taskId, errMsg);
} catch (IOException ioe) {
LOG.fatal("Failed to update failure diagnosis", ioe);
}
LOG.fatal("Killing " + taskId);
resetDoneFlag();
ExitUtil.terminate(69);
} catch (Throwable t) {
LOG.info("Communication exception: " + StringUtils.stringifyException(t));
remainingRetries -=1;
if (remainingRetries == 0) {
ReflectionUtils.logThreadInfo(LOG, "Communication exception", 0);
LOG.warn("Last retry, killing "+taskId);
resetDoneFlag();
System.exit(65);
}
}
}
//Notify that we are done with the work
resetDoneFlag();
}
void resetDoneFlag() {
synchronized (lock) {
done = true;
lock.notify();
}
}
public void startCommunicationThread() {
if (pingThread == null) {
pingThread = new Thread(this, "communication thread");
pingThread.setDaemon(true);
pingThread.start();
}
}
public void stopCommunicationThread() throws InterruptedException {
if (pingThread != null) {
// Intent of the lock is to not send an interupt in the middle of an
// umbilical.ping or umbilical.statusUpdate
synchronized(lock) {
//Interrupt if sleeping. Otherwise wait for the RPC call to return.
lock.notify();
}
synchronized (lock) {
while (!done) {
lock.wait();
}
}
pingThread.interrupt();
pingThread.join();
}
}
}
/**
* Reports the next executing record range to TaskTracker.
*
* @param umbilical
* @param nextRecIndex the record index which would be fed next.
* @throws IOException
*/
protected void reportNextRecordRange(final TaskUmbilicalProtocol umbilical,
long nextRecIndex) throws IOException{
//currentRecStartIndex is the start index which has not yet been finished
//and is still in task's stomach.
long len = nextRecIndex - currentRecStartIndex +1;
SortedRanges.Range range =
new SortedRanges.Range(currentRecStartIndex, len);
taskStatus.setNextRecordRange(range);
if (LOG.isDebugEnabled()) {
LOG.debug("sending reportNextRecordRange " + range);
}
umbilical.reportNextRecordRange(taskId, range);
}
/**
* Create a TaskReporter and start communication thread
*/
TaskReporter startReporter(final TaskUmbilicalProtocol umbilical) {
// start thread that will handle communication with parent
TaskReporter reporter = new TaskReporter(getProgress(), umbilical);
reporter.startCommunicationThread();
return reporter;
}
/**
* Update resource information counters
*/
void updateResourceCounters() {
// Update generic resource counters
updateHeapUsageCounter();
// Updating resources specified in ResourceCalculatorProcessTree
if (pTree == null) {
return;
}
pTree.updateProcessTree();
long cpuTime = pTree.getCumulativeCpuTime();
long pMem = pTree.getRssMemorySize();
long vMem = pTree.getVirtualMemorySize();
// Remove the CPU time consumed previously by JVM reuse
if (cpuTime != ResourceCalculatorProcessTree.UNAVAILABLE &&
initCpuCumulativeTime != ResourceCalculatorProcessTree.UNAVAILABLE) {
cpuTime -= initCpuCumulativeTime;
}
if (cpuTime != ResourceCalculatorProcessTree.UNAVAILABLE) {
counters.findCounter(TaskCounter.CPU_MILLISECONDS).setValue(cpuTime);
}
if (pMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
}
if (vMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
}
if (pMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
TaskCounter counter = isMapTask() ?
TaskCounter.MAP_PHYSICAL_MEMORY_BYTES_MAX :
TaskCounter.REDUCE_PHYSICAL_MEMORY_BYTES_MAX;
Counters.Counter pMemCounter =
counters.findCounter(counter);
pMemCounter.setValue(Math.max(pMemCounter.getValue(), pMem));
}
if (vMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
TaskCounter counter = isMapTask() ?
TaskCounter.MAP_VIRTUAL_MEMORY_BYTES_MAX :
TaskCounter.REDUCE_VIRTUAL_MEMORY_BYTES_MAX;
Counters.Counter vMemCounter =
counters.findCounter(counter);
vMemCounter.setValue(Math.max(vMemCounter.getValue(), vMem));
}
}
/**
* An updater that tracks the amount of time this task has spent in GC.
*/
class GcTimeUpdater {
private long lastGcMillis = 0;
private List<GarbageCollectorMXBean> gcBeans = null;
public GcTimeUpdater() {
this.gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
getElapsedGc(); // Initialize 'lastGcMillis' with the current time spent.
}
/**
* @return the number of milliseconds that the gc has used for CPU
* since the last time this method was called.
*/
protected long getElapsedGc() {
long thisGcMillis = 0;
for (GarbageCollectorMXBean gcBean : gcBeans) {
thisGcMillis += gcBean.getCollectionTime();
}
long delta = thisGcMillis - lastGcMillis;
this.lastGcMillis = thisGcMillis;
return delta;
}
/**
* Increment the gc-elapsed-time counter.
*/
public void incrementGcCounter() {
if (null == counters) {
return; // nothing to do.
}
org.apache.hadoop.mapred.Counters.Counter gcCounter =
counters.findCounter(TaskCounter.GC_TIME_MILLIS);
if (null != gcCounter) {
gcCounter.increment(getElapsedGc());
}
}
}
/**
* An updater that tracks the last number reported for a given file
* system and only creates the counters when they are needed.
*/
class FileSystemStatisticUpdater {
private List<FileSystem.Statistics> stats;
private Counters.Counter readBytesCounter, writeBytesCounter,
readOpsCounter, largeReadOpsCounter, writeOpsCounter;
private String scheme;
FileSystemStatisticUpdater(List<FileSystem.Statistics> stats, String scheme) {
this.stats = stats;
this.scheme = scheme;
}
void updateCounters() {
if (readBytesCounter == null) {
readBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_READ);
}
if (writeBytesCounter == null) {
writeBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_WRITTEN);
}
if (readOpsCounter == null) {
readOpsCounter = counters.findCounter(scheme,
FileSystemCounter.READ_OPS);
}
if (largeReadOpsCounter == null) {
largeReadOpsCounter = counters.findCounter(scheme,
FileSystemCounter.LARGE_READ_OPS);
}
if (writeOpsCounter == null) {
writeOpsCounter = counters.findCounter(scheme,
FileSystemCounter.WRITE_OPS);
}
long readBytes = 0;
long writeBytes = 0;
long readOps = 0;
long largeReadOps = 0;
long writeOps = 0;
for (FileSystem.Statistics stat: stats) {
readBytes = readBytes + stat.getBytesRead();
writeBytes = writeBytes + stat.getBytesWritten();
readOps = readOps + stat.getReadOps();
largeReadOps = largeReadOps + stat.getLargeReadOps();
writeOps = writeOps + stat.getWriteOps();
}
readBytesCounter.setValue(readBytes);
writeBytesCounter.setValue(writeBytes);
readOpsCounter.setValue(readOps);
largeReadOpsCounter.setValue(largeReadOps);
writeOpsCounter.setValue(writeOps);
}
}
/**
* A Map where Key-> URIScheme and value->FileSystemStatisticUpdater
*/
private Map<String, FileSystemStatisticUpdater> statisticUpdaters =
new HashMap<String, FileSystemStatisticUpdater>();
private synchronized void updateCounters() {
Map<String, List<FileSystem.Statistics>> map = new
HashMap<String, List<FileSystem.Statistics>>();
for(Statistics stat: FileSystem.getAllStatistics()) {
String uriScheme = stat.getScheme();
if (map.containsKey(uriScheme)) {
List<FileSystem.Statistics> list = map.get(uriScheme);
list.add(stat);
} else {
List<FileSystem.Statistics> list = new ArrayList<FileSystem.Statistics>();
list.add(stat);
map.put(uriScheme, list);
}
}
for (Map.Entry<String, List<FileSystem.Statistics>> entry: map.entrySet()) {
FileSystemStatisticUpdater updater = statisticUpdaters.get(entry.getKey());
if(updater==null) {//new FileSystem has been found in the cache
updater = new FileSystemStatisticUpdater(entry.getValue(), entry.getKey());
statisticUpdaters.put(entry.getKey(), updater);
}
updater.updateCounters();
}
gcUpdater.incrementGcCounter();
updateResourceCounters();
}
/**
* Updates the {@link TaskCounter#COMMITTED_HEAP_BYTES} counter to reflect the
* current total committed heap space usage of this JVM.
*/
@SuppressWarnings("deprecation")
private void updateHeapUsageCounter() {
long currentHeapUsage = Runtime.getRuntime().totalMemory();
counters.findCounter(TaskCounter.COMMITTED_HEAP_BYTES)
.setValue(currentHeapUsage);
}
public void done(TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, InterruptedException {
updateCounters();
if (taskStatus.getRunState() == TaskStatus.State.PREEMPTED ) {
// If we are preempted, do no output promotion; signal done and exit
committer.commitTask(taskContext);
umbilical.preempted(taskId, taskStatus);
taskDone.set(true);
reporter.stopCommunicationThread();
return;
}
LOG.info("Task:" + taskId + " is done."
+ " And is in the process of committing");
boolean commitRequired = isCommitRequired();
if (commitRequired) {
int retries = MAX_RETRIES;
setState(TaskStatus.State.COMMIT_PENDING);
// say the task tracker that task is commit pending
while (true) {
try {
umbilical.commitPending(taskId, taskStatus);
break;
} catch (InterruptedException ie) {
// ignore
} catch (IOException ie) {
LOG.warn("Failure sending commit pending: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
System.exit(67);
}
}
}
//wait for commit approval and commit
commit(umbilical, reporter, committer);
}
taskDone.set(true);
reporter.stopCommunicationThread();
// Make sure we send at least one set of counter increments. It's
// ok to call updateCounters() in this thread after comm thread stopped.
updateCounters();
sendLastUpdate(umbilical);
//signal the tasktracker that we are done
sendDone(umbilical);
}
/**
* Checks if this task has anything to commit, depending on the
* type of task, as well as on whether the {@link OutputCommitter}
* has anything to commit.
*
* @return true if the task has to commit
* @throws IOException
*/
boolean isCommitRequired() throws IOException {
boolean commitRequired = false;
if (isMapOrReduce()) {
commitRequired = committer.needsTaskCommit(taskContext);
}
return commitRequired;
}
/**
* Send a status update to the task tracker
* @param umbilical
* @throws IOException
*/
public void statusUpdate(TaskUmbilicalProtocol umbilical)
throws IOException {
int retries = MAX_RETRIES;
while (true) {
try {
if (!umbilical.statusUpdate(getTaskID(), taskStatus).getTaskFound()) {
LOG.warn("Parent died. Exiting "+taskId);
System.exit(66);
}
taskStatus.clearStatus();
return;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt(); // interrupt ourself
} catch (IOException ie) {
LOG.warn("Failure sending status update: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
throw ie;
}
}
}
}
/**
* Sends last status update before sending umbilical.done();
*/
private void sendLastUpdate(TaskUmbilicalProtocol umbilical)
throws IOException {
taskStatus.setOutputSize(calculateOutputSize());
// send a final status report
taskStatus.statusUpdate(taskProgress.get(),
taskProgress.toString(),
counters);
statusUpdate(umbilical);
}
/**
* Calculates the size of output for this task.
*
* @return -1 if it can't be found.
*/
private long calculateOutputSize() throws IOException {
if (!isMapOrReduce()) {
return -1;
}
if (isMapTask() && conf.getNumReduceTasks() > 0) {
try {
Path mapOutput = mapOutputFile.getOutputFile();
FileSystem localFS = FileSystem.getLocal(conf);
return localFS.getFileStatus(mapOutput).getLen();
} catch (IOException e) {
LOG.warn ("Could not find output size " , e);
}
}
return -1;
}
private void sendDone(TaskUmbilicalProtocol umbilical) throws IOException {
int retries = MAX_RETRIES;
while (true) {
try {
if (isMapTask() && conf.getNumReduceTasks() > 0) {
int numReduceTasks = conf.getNumReduceTasks();
long[] startOffsetArray = new long[numReduceTasks];
long[] partLengthArray = new long[numReduceTasks];
for (Map.Entry<Integer, Long> startOffset : startOffsetMap.entrySet()) {
startOffsetArray[startOffset.getKey()] = startOffset.getValue();
}
for (Map.Entry<Integer, Long> partLength : partLengthMap.entrySet()) {
partLengthArray[partLength.getKey()] = partLength.getValue();
}
umbilical.done(getTaskID(),
mapOutputFile.getOutputFile().toString(),
startOffsetArray, partLengthArray);
} else {
umbilical.done(getTaskID(),
new String(),
new long[0], new long[0]);
}
LOG.info("Task '" + taskId + "' done.");
return;
} catch (IOException ie) {
LOG.warn("Failure signalling completion: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
throw ie;
}
}
}
}
private void commit(TaskUmbilicalProtocol umbilical,
TaskReporter reporter,
org.apache.hadoop.mapreduce.OutputCommitter committer
) throws IOException {
int retries = MAX_RETRIES;
while (true) {
try {
while (!umbilical.canCommit(taskId)) {
try {
Thread.sleep(1000);
} catch(InterruptedException ie) {
//ignore
}
reporter.setProgressFlag();
}
break;
} catch (IOException ie) {
LOG.warn("Failure asking whether task can commit: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
//if it couldn't query successfully then delete the output
discardOutput(taskContext);
System.exit(68);
}
}
}
// task can Commit now
try {
LOG.info("Task " + taskId + " is allowed to commit now");
committer.commitTask(taskContext);
return;
} catch (IOException iee) {
LOG.warn("Failure committing: " +
StringUtils.stringifyException(iee));
//if it couldn't commit a successfully then delete the output
discardOutput(taskContext);
throw iee;
}
}
private
void discardOutput(TaskAttemptContext taskContext) {
try {
committer.abortTask(taskContext);
} catch (IOException ioe) {
LOG.warn("Failure cleaning up: " +
StringUtils.stringifyException(ioe));
}
}
protected void runTaskCleanupTask(TaskUmbilicalProtocol umbilical,
TaskReporter reporter)
throws IOException, InterruptedException {
taskCleanup(umbilical);
done(umbilical, reporter);
}
void taskCleanup(TaskUmbilicalProtocol umbilical)
throws IOException {
// set phase for this task
setPhase(TaskStatus.Phase.CLEANUP);
getProgress().setStatus("cleanup");
statusUpdate(umbilical);
LOG.info("Running cleanup for the task");
// do the cleanup
committer.abortTask(taskContext);
}
protected void runJobCleanupTask(TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, InterruptedException {
// set phase for this task
setPhase(TaskStatus.Phase.CLEANUP);
getProgress().setStatus("cleanup");
statusUpdate(umbilical);
// do the cleanup
LOG.info("Cleaning up job");
if (jobRunStateForCleanup == JobStatus.State.FAILED
|| jobRunStateForCleanup == JobStatus.State.KILLED) {
LOG.info("Aborting job with runstate : " + jobRunStateForCleanup.name());
if (conf.getUseNewMapper()) {
committer.abortJob(jobContext, jobRunStateForCleanup);
} else {
org.apache.hadoop.mapred.OutputCommitter oldCommitter =
(org.apache.hadoop.mapred.OutputCommitter)committer;
oldCommitter.abortJob(jobContext, jobRunStateForCleanup);
}
} else if (jobRunStateForCleanup == JobStatus.State.SUCCEEDED){
LOG.info("Committing job");
committer.commitJob(jobContext);
} else {
throw new IOException("Invalid state of the job for cleanup. State found "
+ jobRunStateForCleanup + " expecting "
+ JobStatus.State.SUCCEEDED + ", "
+ JobStatus.State.FAILED + " or "
+ JobStatus.State.KILLED);
}
// delete the staging area for the job
JobConf conf = new JobConf(jobContext.getConfiguration());
if (!keepTaskFiles(conf)) {
String jobTempDir = conf.get(MRJobConfig.MAPREDUCE_JOB_DIR);
Path jobTempDirPath = new Path(jobTempDir);
FileSystem fs = jobTempDirPath.getFileSystem(conf);
fs.delete(jobTempDirPath, true);
}
done(umbilical, reporter);
}
protected boolean keepTaskFiles(JobConf conf) {
return (conf.getKeepTaskFilesPattern() != null || conf
.getKeepFailedTaskFiles());
}
protected void runJobSetupTask(TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, InterruptedException {
// do the setup
getProgress().setStatus("setup");
committer.setupJob(jobContext);
done(umbilical, reporter);
}
public void setConf(Configuration conf) {
if (conf instanceof JobConf) {
this.conf = (JobConf) conf;
} else {
this.conf = new JobConf(conf);
}
this.mapOutputFile = ReflectionUtils.newInstance(
conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
MROutputFiles.class, MapOutputFile.class), conf);
this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
// add the static resolutions (this is required for the junit to
// work on testcases that simulate multiple nodes on a single physical
// node.
String hostToResolved[] = conf.getStrings(MRConfig.STATIC_RESOLUTIONS);
if (hostToResolved != null) {
for (String str : hostToResolved) {
String name = str.substring(0, str.indexOf('='));
String resolvedName = str.substring(str.indexOf('=') + 1);
NetUtils.addStaticResolution(name, resolvedName);
}
}
}
public Configuration getConf() {
return this.conf;
}
public MapOutputFile getMapOutputFile() {
return mapOutputFile;
}
/**
* OutputCollector for the combiner.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class CombineOutputCollector<K extends Object, V extends Object>
implements OutputCollector<K, V> {
private Writer<K, V> writer;
private Counters.Counter outCounter;
private Progressable progressable;
private long progressBar;
public CombineOutputCollector(Counters.Counter outCounter, Progressable progressable, Configuration conf) {
this.outCounter = outCounter;
this.progressable=progressable;
progressBar = conf.getLong(MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS, DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS);
}
public synchronized void setWriter(Writer<K, V> writer) {
this.writer = writer;
}
public synchronized void collect(K key, V value)
throws IOException {
outCounter.increment(1);
writer.append(key, value);
if ((outCounter.getValue() % progressBar) == 0) {
progressable.progress();
}
}
}
/** Iterates values while keys match in sorted input. */
static class ValuesIterator<KEY,VALUE> implements Iterator<VALUE> {
protected RawKeyValueIterator in; //input iterator
private KEY key; // current key
private KEY nextKey;
private VALUE value; // current value
private boolean hasNext; // more w/ this key
private boolean more; // more in file
private RawComparator<KEY> comparator;
protected Progressable reporter;
private Deserializer<KEY> keyDeserializer;
private Deserializer<VALUE> valDeserializer;
private DataInputBuffer keyIn = new DataInputBuffer();
private DataInputBuffer valueIn = new DataInputBuffer();
public ValuesIterator (RawKeyValueIterator in,
RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf,
Progressable reporter)
throws IOException {
this.in = in;
this.comparator = comparator;
this.reporter = reporter;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(keyIn);
this.valDeserializer = serializationFactory.getDeserializer(valClass);
this.valDeserializer.open(this.valueIn);
readNextKey();
key = nextKey;
nextKey = null; // force new instance creation
hasNext = more;
}
RawKeyValueIterator getRawIterator() { return in; }
/// Iterator methods
public boolean hasNext() { return hasNext; }
private int ctr = 0;
public VALUE next() {
if (!hasNext) {
throw new NoSuchElementException("iterate past last value");
}
try {
readNextValue();
readNextKey();
} catch (IOException ie) {
throw new RuntimeException("problem advancing post rec#"+ctr, ie);
}
reporter.progress();
return value;
}
public void remove() { throw new RuntimeException("not implemented"); }
/// Auxiliary methods
/** Start processing next unique key. */
public void nextKey() throws IOException {
// read until we find a new key
while (hasNext) {
readNextKey();
}
++ctr;
// move the next key to the current one
KEY tmpKey = key;
key = nextKey;
nextKey = tmpKey;
hasNext = more;
}
/** True iff more keys remain. */
public boolean more() {
return more;
}
/** The current key. */
public KEY getKey() {
return key;
}
/**
* read the next key
*/
private void readNextKey() throws IOException {
more = in.next();
if (more) {
DataInputBuffer nextKeyBytes = in.getKey();
keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength());
nextKey = keyDeserializer.deserialize(nextKey);
hasNext = key != null && (comparator.compare(key, nextKey) == 0);
} else {
hasNext = false;
}
}
/**
* Read the next value
* @throws IOException
*/
private void readNextValue() throws IOException {
DataInputBuffer nextValueBytes = in.getValue();
valueIn.reset(nextValueBytes.getData(), nextValueBytes.getPosition(), nextValueBytes.getLength());
value = valDeserializer.deserialize(value);
}
}
/** Iterator to return Combined values */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class CombineValuesIterator<KEY,VALUE>
extends ValuesIterator<KEY,VALUE> {
private final Counters.Counter combineInputCounter;
public CombineValuesIterator(RawKeyValueIterator in,
RawComparator<KEY> comparator, Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf, Reporter reporter,
Counters.Counter combineInputCounter) throws IOException {
super(in, comparator, keyClass, valClass, conf, reporter);
this.combineInputCounter = combineInputCounter;
}
public VALUE next() {
combineInputCounter.increment(1);
return super.next();
}
}
@SuppressWarnings("unchecked")
protected static <INKEY,INVALUE,OUTKEY,OUTVALUE>
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
createReduceContext(org.apache.hadoop.mapreduce.Reducer
<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
Configuration job,
org.apache.hadoop.mapreduce.TaskAttemptID taskId,
RawKeyValueIterator rIter,
org.apache.hadoop.mapreduce.Counter inputKeyCounter,
org.apache.hadoop.mapreduce.Counter inputValueCounter,
org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output,
org.apache.hadoop.mapreduce.OutputCommitter committer,
org.apache.hadoop.mapreduce.StatusReporter reporter,
RawComparator<INKEY> comparator,
Class<INKEY> keyClass, Class<INVALUE> valueClass
) throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
reduceContext =
new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, taskId,
rIter,
inputKeyCounter,
inputValueCounter,
output,
committer,
reporter,
comparator,
keyClass,
valueClass);
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
reducerContext =
new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
reduceContext);
return reducerContext;
}
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public static abstract class CombinerRunner<K,V> {
protected final Counters.Counter inputCounter;
protected final JobConf job;
protected final TaskReporter reporter;
CombinerRunner(Counters.Counter inputCounter,
JobConf job,
TaskReporter reporter) {
this.inputCounter = inputCounter;
this.job = job;
this.reporter = reporter;
}
/**
* Run the combiner over a set of inputs.
* @param iterator the key/value pairs to use as input
* @param collector the output collector
*/
public abstract void combine(RawKeyValueIterator iterator,
OutputCollector<K,V> collector
) throws IOException, InterruptedException,
ClassNotFoundException;
@SuppressWarnings("unchecked")
public static <K,V>
CombinerRunner<K,V> create(JobConf job,
TaskAttemptID taskId,
Counters.Counter inputCounter,
TaskReporter reporter,
org.apache.hadoop.mapreduce.OutputCommitter committer
) throws ClassNotFoundException {
Class<? extends Reducer<K,V,K,V>> cls =
(Class<? extends Reducer<K,V,K,V>>) job.getCombinerClass();
if (cls != null) {
return new OldCombinerRunner(cls, job, inputCounter, reporter);
}
// make a task context so we can get the classes
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job, taskId,
reporter);
Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>> newcls =
(Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>)
taskContext.getCombinerClass();
if (newcls != null) {
return new NewCombinerRunner<K,V>(newcls, job, taskId, taskContext,
inputCounter, reporter, committer);
}
return null;
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
protected static class OldCombinerRunner<K,V> extends CombinerRunner<K,V> {
private final Class<? extends Reducer<K,V,K,V>> combinerClass;
private final Class<K> keyClass;
private final Class<V> valueClass;
private final RawComparator<K> comparator;
@SuppressWarnings("unchecked")
protected OldCombinerRunner(Class<? extends Reducer<K,V,K,V>> cls,
JobConf conf,
Counters.Counter inputCounter,
TaskReporter reporter) {
super(inputCounter, conf, reporter);
combinerClass = cls;
keyClass = (Class<K>) job.getMapOutputKeyClass();
valueClass = (Class<V>) job.getMapOutputValueClass();
comparator = (RawComparator<K>)
job.getCombinerKeyGroupingComparator();
}
@SuppressWarnings("unchecked")
public void combine(RawKeyValueIterator kvIter,
OutputCollector<K,V> combineCollector
) throws IOException {
Reducer<K,V,K,V> combiner =
ReflectionUtils.newInstance(combinerClass, job);
try {
CombineValuesIterator<K,V> values =
new CombineValuesIterator<K,V>(kvIter, comparator, keyClass,
valueClass, job, reporter,
inputCounter);
while (values.more()) {
combiner.reduce(values.getKey(), values, combineCollector,
reporter);
values.nextKey();
}
} finally {
combiner.close();
}
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
protected static class NewCombinerRunner<K, V> extends CombinerRunner<K,V> {
private final Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>
reducerClass;
private final org.apache.hadoop.mapreduce.TaskAttemptID taskId;
private final RawComparator<K> comparator;
private final Class<K> keyClass;
private final Class<V> valueClass;
private final org.apache.hadoop.mapreduce.OutputCommitter committer;
@SuppressWarnings("unchecked")
NewCombinerRunner(Class reducerClass,
JobConf job,
org.apache.hadoop.mapreduce.TaskAttemptID taskId,
org.apache.hadoop.mapreduce.TaskAttemptContext context,
Counters.Counter inputCounter,
TaskReporter reporter,
org.apache.hadoop.mapreduce.OutputCommitter committer) {
super(inputCounter, job, reporter);
this.reducerClass = reducerClass;
this.taskId = taskId;
keyClass = (Class<K>) context.getMapOutputKeyClass();
valueClass = (Class<V>) context.getMapOutputValueClass();
comparator = (RawComparator<K>) context.getCombinerKeyGroupingComparator();
this.committer = committer;
}
private static class OutputConverter<K,V>
extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
OutputCollector<K,V> output;
OutputConverter(OutputCollector<K,V> output) {
this.output = output;
}
@Override
public void close(org.apache.hadoop.mapreduce.TaskAttemptContext context){
}
@Override
public void write(K key, V value
) throws IOException, InterruptedException {
output.collect(key,value);
}
}
@SuppressWarnings("unchecked")
@Override
public void combine(RawKeyValueIterator iterator,
OutputCollector<K,V> collector
) throws IOException, InterruptedException,
ClassNotFoundException {
// make a reducer
org.apache.hadoop.mapreduce.Reducer<K,V,K,V> reducer =
(org.apache.hadoop.mapreduce.Reducer<K,V,K,V>)
ReflectionUtils.newInstance(reducerClass, job);
org.apache.hadoop.mapreduce.Reducer.Context
reducerContext = createReduceContext(reducer, job, taskId,
iterator, null, inputCounter,
new OutputConverter(collector),
committer,
reporter, comparator, keyClass,
valueClass);
reducer.run(reducerContext);
}
}
BytesWritable getExtraData() {
return extraData;
}
void setExtraData(BytesWritable extraData) {
this.extraData = extraData;
}
protected static ConcurrentMap<Integer, Long> startOffsetMap = new ConcurrentHashMap<>();
protected static ConcurrentMap<Integer, Long> partLengthMap = new ConcurrentHashMap<>();
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
tests/jobs/test_scheduler_job.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
import shutil
import unittest
from datetime import timedelta
from tempfile import NamedTemporaryFile, mkdtemp
from zipfile import ZipFile
import mock
import psutil
import pytest
import six
from mock import MagicMock, patch
from parameterized import parameterized
from sqlalchemy import func
from sqlalchemy.exc import OperationalError
import airflow.example_dags
import airflow.smart_sensor_dags
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.backfill_job import BackfillJob
from airflow.jobs.scheduler_job import DagFileProcessor, SchedulerJob
from airflow.models import DAG, DagBag, DagModel, Pool, SlaMiss, TaskInstance, errors
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.operators.bash import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.utils import timezone
from airflow.utils.callback_requests import DagCallbackRequest, TaskCallbackRequest
from airflow.utils.dates import days_ago
from airflow.utils.file import list_py_file_paths
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars, env_vars
from tests.test_utils.db import (
clear_db_dags, clear_db_errors, clear_db_jobs, clear_db_pools, clear_db_runs, clear_db_serialized_dags,
clear_db_sla_miss, set_default_pool_slots,
)
from tests.test_utils.mock_executor import MockExecutor
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
PERF_DAGS_FOLDER = os.path.join(ROOT_FOLDER, "tests", "test_utils", "perf", "dags")
ELASTIC_DAG_FILE = os.path.join(PERF_DAGS_FOLDER, "elastic_dag.py")
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
INVALID_DAG_WITH_DEPTH_FILE_CONTENTS = (
"def something():\n"
" return airflow_DAG\n"
"something()"
)
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
@pytest.fixture(scope="class")
def disable_load_example():
with conf_vars({('core', 'load_examples'): 'false'}):
with env_vars({('core', 'load_examples'): 'false'}):
yield
@pytest.mark.usefixtures("disable_load_example")
class TestDagFileProcessor(unittest.TestCase):
@staticmethod
def clean_db():
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
clear_db_jobs()
clear_db_serialized_dags()
def setUp(self):
self.clean_db()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
def tearDown(self) -> None:
self.clean_db()
def create_test_dag(self, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(hours=1), **kwargs):
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=start_date,
# Make sure it only creates a single DAG Run
end_date=end_date)
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id, is_paused=False)
session.merge(orm_dag)
session.commit()
return dag
@classmethod
@patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", True)
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(read_dags_from_db=True)
def test_dag_file_processor_sla_miss_callback(self):
"""
Test that the dag file processor calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta()})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_dag_file_processor_sla_miss_callback_invalid_sla(self):
"""
Test that the dag file processor does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': None})
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_dag_file_processor_sla_miss_callback_sent_notification(self):
"""
Test that the dag file processor does not call the sla_miss_callback when a
notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
# Create a TaskInstance for two days ago
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_dag_file_processor_sla_miss_callback_exception(self):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log.exception.assert_called_once_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch('airflow.jobs.scheduler_job.send_email')
def test_dag_file_processor_only_collect_emails_from_sla_missed_tasks(self, mock_send_email):
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
email1 = '[email protected]'
task = DummyOperator(task_id='sla_missed',
dag=dag,
owner='airflow',
email=email1,
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
email2 = '[email protected]'
DummyOperator(task_id='sla_not_missed',
dag=dag,
owner='airflow',
email=email2)
session.merge(SlaMiss(task_id='sla_missed', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
self.assertTrue(len(mock_send_email.call_args_list), 1)
send_email_to = mock_send_email.call_args_list[0][0][0]
self.assertIn(email1, send_email_to)
self.assertNotIn(email2, send_email_to)
@mock.patch('airflow.jobs.scheduler_job.Stats.incr')
@mock.patch("airflow.utils.email.send_email")
def test_dag_file_processor_sla_miss_email_exception(self, mock_send_email, mock_stats_incr):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='[email protected]',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
mock_log.exception.assert_called_once_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
mock_stats_incr.assert_called_once_with('sla_email_notification_failure')
def test_dag_file_processor_sla_miss_deleted_task(self):
"""
Test that the dag file processor will not crash when trying to send
sla miss notification for a deleted task
"""
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='[email protected]',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy_deleted', dag_id='test_sla_miss',
execution_date=test_start_date))
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
BashOperator(
task_id='dummy',
dag=dag,
owner='airflow',
bash_command='echo hi'
)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
scheduler.processor_agent = mock.MagicMock()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
assert dr is not None
with create_session() as session:
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
count = scheduler._schedule_dag_run(dr, 0, session)
assert count == 1
session.refresh(ti)
assert ti.state == State.SCHEDULED
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances_with_task_concurrency(
self, state, start_date, end_date,
):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task_with_task_concurrency',
start_date=DEFAULT_DATE)
BashOperator(
task_id='dummy',
task_concurrency=2,
dag=dag,
owner='airflow',
bash_command='echo Hi'
)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
scheduler.processor_agent = mock.MagicMock()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
assert dr is not None
with create_session() as session:
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
count = scheduler._schedule_dag_run(dr, 0, session)
assert count == 1
session.refresh(ti)
assert ti.state == State.SCHEDULED
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances_depends_on_past(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task_depends_on_past',
start_date=DEFAULT_DATE,
default_args={
'depends_on_past': True,
},
)
BashOperator(
task_id='dummy1',
dag=dag,
owner='airflow',
bash_command='echo hi'
)
BashOperator(
task_id='dummy2',
dag=dag,
owner='airflow',
bash_command='echo hi'
)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
scheduler.processor_agent = mock.MagicMock()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
assert dr is not None
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
count = scheduler._schedule_dag_run(dr, 0, session)
assert count == 2
session.refresh(tis[0])
session.refresh(tis[1])
assert tis[0].state == State.SCHEDULED
assert tis[1].state == State.SCHEDULED
def test_scheduler_job_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(dag_id='test_scheduler_add_new_task', start_date=DEFAULT_DATE)
BashOperator(task_id='dummy', dag=dag, owner='airflow', bash_command='echo test')
scheduler = SchedulerJob()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
scheduler.dagbag.sync_to_db()
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
assert orm_dag is not None
scheduler = SchedulerJob()
scheduler.processor_agent = mock.MagicMock()
dag = scheduler.dagbag.get_dag('test_scheduler_add_new_task', session=session)
scheduler._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
BashOperator(task_id='dummy2', dag=dag, owner='airflow', bash_command='echo test')
SerializedDagModel.write_dag(dag=dag)
scheduled_tis = scheduler._schedule_dag_run(dr, 0, session)
session.flush()
assert scheduled_tis == 2
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
BashOperator(
task_id='dummy',
dag=dag,
owner='airflow',
bash_command='echo Hi'
)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
scheduler.processor_agent = mock.MagicMock()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
dag.clear()
date = DEFAULT_DATE
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
date = dag.following_schedule(date)
dr2 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
date = dag.following_schedule(date)
dr3 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
# First create up to 3 dagruns in RUNNING state.
assert dr1 is not None
assert dr2 is not None
assert dr3 is not None
assert len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)) == 3
# Reduce max_active_runs to 1
dag.max_active_runs = 1
# and schedule them in, so we can check how many
# tasks are put on the task_instances_list (should be one, not 3)
with create_session() as session:
num_scheduled = scheduler._schedule_dag_run(dr1, 0, session)
assert num_scheduled == 1
num_scheduled = scheduler._schedule_dag_run(dr2, 1, session)
assert num_scheduled == 0
num_scheduled = scheduler._schedule_dag_run(dr3, 1, session)
assert num_scheduled == 0
@patch.object(TaskInstance, 'handle_failure')
def test_execute_on_failure_callbacks(self, mock_ti_handle_failure):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
TaskCallbackRequest(
full_filepath="A",
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
dag_file_processor.execute_callbacks(dagbag, requests)
mock_ti_handle_failure.assert_called_once_with(
"Message",
conf.getboolean('core', 'unit_test_mode'),
mock.ANY
)
def test_process_file_should_failure_callback(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_on_failure_callback.py'
)
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session, NamedTemporaryFile(delete=False) as callback_file:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('test_om_failure_callback_dag')
task = dag.get_task(task_id='test_om_failure_callback_task')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
TaskCallbackRequest(
full_filepath=dag.full_filepath,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
callback_file.close()
with mock.patch.dict("os.environ", {"AIRFLOW_CALLBACK_FILE": callback_file.name}):
dag_file_processor.process_file(dag_file, requests)
with open(callback_file.name) as callback_file2:
content = callback_file2.read()
self.assertEqual("Callback fired", content)
os.remove(callback_file.name)
@mock.patch("airflow.jobs.scheduler_job.DagBag")
def test_process_file_should_retry_sync_to_db(self, mock_dagbag):
"""Test that dagbag.sync_to_db is retried on OperationalError"""
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
mock_dagbag.return_value.dags = {'example_dag': mock.ANY}
op_error = OperationalError(statement=mock.ANY, params=mock.ANY, orig=mock.ANY)
# Mock error for the first 2 tries and a successful third try
side_effect = [op_error, op_error, mock.ANY]
mock_sync_to_db = mock.Mock(side_effect=side_effect)
mock_dagbag.return_value.sync_to_db = mock_sync_to_db
dag_file_processor.process_file("/dev/null", callback_requests=mock.MagicMock())
mock_sync_to_db.assert_has_calls([mock.call(), mock.call(), mock.call()])
def test_should_mark_dummy_task_as_success(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_only_dummy_tasks.py'
)
# Write DAGs to dag and serialized_dag table
with mock.patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", return_value=True):
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dagbag.sync_to_db()
scheduler_job = SchedulerJob()
scheduler_job.processor_agent = mock.MagicMock()
dag = scheduler_job.dagbag.get_dag("test_only_dummy_tasks")
# Create DagRun
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
scheduler_job._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
# Schedule TaskInstances
scheduler_job._schedule_dag_run(dr, 0, session)
with create_session() as session:
tis = session.query(TaskInstance).all()
dags = scheduler_job.dagbag.dags.values()
self.assertEqual(['test_only_dummy_tasks'], [dag.dag_id for dag in dags])
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', None),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
scheduler_job._schedule_dag_run(dr, 0, session)
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', 'success'),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
@pytest.mark.usefixtures("disable_load_example")
class TestSchedulerJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
@classmethod
@patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", True)
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(read_dags_from_db=True)
def test_is_alive(self):
job = SchedulerJob(None, heartrate=10, state=State.RUNNING)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=20)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=31)
self.assertFalse(job.is_alive())
# test because .seconds was used before instead of total_seconds
# internal repr of datetime is (days, seconds)
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(days=1)
self.assertFalse(job.is_alive())
job.state = State.SUCCESS
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=10)
self.assertFalse(job.is_alive(), "Completed jobs even with recent heartbeat should not be alive")
def run_single_scheduler_loop_with_no_dags(self, dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type dags_folder: str
"""
scheduler = SchedulerJob(
executor=self.null_exec,
num_times_parse_dags=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1,
executor=MockExecutor(do_update=False))
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
@mock.patch('airflow.jobs.scheduler_job.TaskCallbackRequest')
@mock.patch('airflow.jobs.scheduler_job.Stats.incr')
def test_process_executor_events(self, mock_stats_incr, mock_task_callback):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, full_filepath="/test_path1/")
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE, full_filepath="/test_path1/")
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dag.fileloc = "/test_path1/"
dag2.fileloc = "/test_path1/"
executor = MockExecutor(do_update=False)
task_callback = mock.MagicMock()
mock_task_callback.return_value = task_callback
scheduler = SchedulerJob(executor=executor)
scheduler.processor_agent = mock.MagicMock()
session = settings.Session()
dag.sync_to_db(session=session)
dag2.sync_to_db(session=session)
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.FAILED, None
scheduler._process_executor_events(session=session)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
mock_task_callback.assert_called_once_with(
full_filepath='/test_path1/',
simple_task_instance=mock.ANY,
msg='Executor reports task instance '
'<TaskInstance: test_process_executor_events.dummy_task 2016-01-01 00:00:00+00:00 [queued]> '
'finished (failed) although the task says its queued. (Info: None) '
'Was the task killed externally?'
)
scheduler.processor_agent.send_callback_to_execute.assert_called_once_with(task_callback)
scheduler.processor_agent.reset_mock()
# ti in success state
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS, None
scheduler._process_executor_events(session=session)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
scheduler.processor_agent.send_callback_to_execute.assert_not_called()
mock_stats_incr.assert_called_once_with('scheduler.tasks.killed_externally')
def test_process_executor_events_uses_inmemory_try_number(self):
execution_date = DEFAULT_DATE
dag_id = "dag_id"
task_id = "task_id"
try_number = 42
executor = MagicMock()
scheduler = SchedulerJob(executor=executor)
scheduler.processor_agent = MagicMock()
event_buffer = {
TaskInstanceKey(dag_id, task_id, execution_date, try_number): (State.SUCCESS, None)
}
executor.get_event_buffer.return_value = event_buffer
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task = DummyOperator(dag=dag, task_id=task_id)
with create_session() as session:
ti = TaskInstance(task, DEFAULT_DATE)
ti.state = State.SUCCESS
session.merge(ti)
scheduler._process_executor_events()
# Assert that the even_buffer is empty so the task was popped using right
# task instance key
self.assertEqual(event_buffer, {})
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dagmodel = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
dr1 = dag.create_dagrun(
run_type=DagRunType.BACKFILL_JOB,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.flush()
scheduler._critical_section_execute_task_instances(session)
session.flush()
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
session.rollback()
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.flush()
scheduler._critical_section_execute_task_instances(session)
session.flush()
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
session.rollback()
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr1 = dag.create_dagrun(
run_type=DagRunType.BACKFILL_JOB,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ti1 = TaskInstance(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.flush()
self.assertTrue(dr1.is_backfill)
scheduler._critical_section_execute_task_instances(session)
session.flush()
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
session.rollback()
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
dr2 = dag.create_dagrun(
run_type=DagRunType.BACKFILL_JOB,
execution_date=dag.following_schedule(dr1.execution_date),
state=State.RUNNING,
)
ti_no_dagrun = TaskInstance(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TaskInstance(task1, dr2.execution_date)
ti_with_dagrun = TaskInstance(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
session.rollback()
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
dr2 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr1.execution_date),
state=State.RUNNING,
)
tis = ([
TaskInstance(task1, dr1.execution_date),
TaskInstance(task2, dr1.execution_date),
TaskInstance(task1, dr2.execution_date),
TaskInstance(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = Pool(pool='a', slots=1, description='haha')
pool2 = Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
session.flush()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
session.rollback()
def test_find_executable_task_instances_in_default_pool(self):
set_default_pool_slots(1)
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_in_default_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
op1 = DummyOperator(dag=dag, task_id='dummy1')
op2 = DummyOperator(dag=dag, task_id='dummy2')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
executor = MockExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
dr2 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr1.execution_date),
state=State.RUNNING,
)
ti1 = TaskInstance(task=op1, execution_date=dr1.execution_date)
ti2 = TaskInstance(task=op2, execution_date=dr2.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.flush()
# Two tasks w/o pool up for execution and our default pool size is 1
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(1, len(res))
ti2.state = State.RUNNING
session.merge(ti2)
session.flush()
# One task w/o pool up for execution and one task task running
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(0, len(res))
session.rollback()
session.close()
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ti = TaskInstance(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
session.flush()
self.assertEqual(0, len(res))
session.rollback()
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
session.flush()
self.assertEqual(0, len(scheduler._executable_task_instances_to_queued(
max_tis=32,
session=session)))
session.rollback()
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
dr2 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr1.execution_date),
state=State.RUNNING,
)
dr3 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr2.execution_date),
state=State.RUNNING,
)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task1, dr2.execution_date)
ti3 = TaskInstance(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(0, len(res))
session.rollback()
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dag_run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ti1 = TaskInstance(task1, dag_run.execution_date)
ti2 = TaskInstance(task2, dag_run.execution_date)
ti3 = TaskInstance(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
session.rollback()
# TODO: This is a hack, I think I need to just remove the setting and have it on always
def test_find_executable_task_instances_task_concurrency(self): # pylint: disable=too-many-statements
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
executor = MockExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
session = settings.Session()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
scheduler.dagbag.sync_to_db(session=session)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
dr2 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr1.execution_date),
state=State.RUNNING,
)
dr3 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr2.execution_date),
state=State.RUNNING,
)
ti1_1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TaskInstance(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TaskInstance(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(1, len(res))
session.rollback()
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
date = DEFAULT_DATE
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
date = dag.following_schedule(date)
dr2 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
date = dag.following_schedule(date)
dr3 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task1, dr2.execution_date)
ti3 = TaskInstance(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.RUNNING
ti3.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.flush()
res = scheduler._executable_task_instances_to_queued(max_tis=100, session=session)
self.assertEqual(0, len(res))
session.rollback()
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ti1 = TaskInstance(task1, dr1.execution_date)
ti1.dag_model = dag_model
session.merge(ti1)
session.flush()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state([ti1])
assert mock_queue_command.called
session.rollback()
def test_critical_section_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.flush()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr1.execution_date),
state=State.RUNNING,
)
ti3 = TaskInstance(task1, dr2.execution_date)
ti4 = TaskInstance(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.flush()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._critical_section_execute_task_instances(session)
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
session = settings.Session()
dag_model = DagModel(
dag_id=dag_id,
is_paused=False,
concurrency=dag.concurrency,
has_task_concurrency_limits=False,
)
session.add(dag_model)
date = dag.start_date
tis = []
for _ in range(0, 4):
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
date = dag.following_schedule(date)
ti1 = TaskInstance(task1, dr.execution_date)
ti2 = TaskInstance(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.flush()
scheduler.max_tis_per_query = 2
res = scheduler._critical_section_execute_task_instances(session)
self.assertEqual(2, res)
scheduler.max_tis_per_query = 8
with mock.patch.object(type(scheduler.executor),
'slots_available',
new_callable=mock.PropertyMock) as mock_slots:
mock_slots.return_value = 2
# Check that we don't "overfill" the executor
self.assertEqual(2, res)
res = scheduler._critical_section_execute_task_instances(session)
res = scheduler._critical_section_execute_task_instances(session)
self.assertEqual(4, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@pytest.mark.quarantined
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TaskInstance(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
with mock.patch.object(settings, "STORE_SERIALIZED_DAGS", True):
dagbag = DagBag("/dev/null", include_examples=False)
dagbag.bag_dag(dag1, root_dag=dag1)
dagbag.bag_dag(dag2, root_dag=dag2)
dagbag.bag_dag(dag3, root_dag=dag3)
dagbag.sync_to_db(session)
scheduler = SchedulerJob(num_runs=0)
scheduler.dagbag.collect_dags_from_db()
scheduler._change_state_for_tis_without_dagrun(
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
self.assertIsNotNone(ti3.start_date)
self.assertIsNone(ti3.end_date)
self.assertIsNone(ti3.duration)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = MockExecutor(do_update=False)
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TaskInstance(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti) # pylint: disable=no-value-for-parameter
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_adopt_or_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_type=DagRunType.BACKFILL_JOB,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
scheduler.processor_agent = processor
scheduler.adopt_or_reset_orphaned_tasks()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED, "Tasks run by Backfill Jobs should not be reset")
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_scheduler_loop_should_change_state_for_tis_without_dagrun(self,
initial_task_state,
expected_task_state):
session = settings.Session()
dag_id = 'test_execute_helper_should_change_state_for_tis_without_dagrun'
dag = DAG(dag_id, start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Write Dag to DB
with mock.patch.object(settings, "STORE_SERIALIZED_DAGS", True):
dagbag = DagBag(dag_folder="/dev/null", include_examples=False)
dagbag.bag_dag(dag, root_dag=dag)
dagbag.sync_to_db()
dag = DagBag(read_dags_from_db=True, include_examples=False).get_dag(dag_id)
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.FAILED,
execution_date=DEFAULT_DATE + timedelta(days=1),
start_date=DEFAULT_DATE + timedelta(days=1),
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = MockExecutor(do_update=False)
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.done = True
scheduler.processor_agent = processor
scheduler._run_scheduler_loop()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
self.assertIsNotNone(ti.start_date)
if expected_task_state in State.finished():
self.assertIsNotNone(ti.end_date)
self.assertEqual(ti.start_date, ti.end_date)
self.assertIsNotNone(ti.duration)
def test_dagrun_timeout_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun would be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
scheduler = SchedulerJob()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
scheduler.dagbag.sync_to_db()
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
assert orm_dag is not None
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
scheduler._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
# Should not be able to create a new dag run, as we are at max active runs
assert orm_dag.next_dagrun_create_after is None
# But we should record the date of _what run_ it would be
assert isinstance(orm_dag.next_dagrun, datetime.datetime)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.flush()
# Mock that processor_agent is started
scheduler.processor_agent = mock.Mock()
scheduler.processor_agent.send_callback_to_execute = mock.Mock()
scheduler._schedule_dag_run(dr, 0, session)
session.flush()
session.refresh(dr)
assert dr.state == State.FAILED
session.refresh(orm_dag)
assert isinstance(orm_dag.next_dagrun, datetime.datetime)
assert isinstance(orm_dag.next_dagrun_create_after, datetime.datetime)
expected_callback = DagCallbackRequest(
full_filepath=dr.dag.fileloc,
dag_id=dr.dag_id,
is_failure_callback=True,
execution_date=dr.execution_date,
msg="timed_out"
)
# Verify dag failure callback request is sent to file processor
scheduler.processor_agent.send_callback_to_execute.assert_called_once_with(expected_callback)
session.rollback()
session.close()
def test_dagrun_timeout_fails_run(self):
"""
Test if a a dagrun will be set failed if timeout, even without max_active_runs
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
scheduler = SchedulerJob()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
scheduler.dagbag.sync_to_db()
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
assert orm_dag is not None
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
scheduler._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.flush()
# Mock that processor_agent is started
scheduler.processor_agent = mock.Mock()
scheduler.processor_agent.send_callback_to_execute = mock.Mock()
scheduler._schedule_dag_run(dr, 0, session)
session.flush()
session.refresh(dr)
assert dr.state == State.FAILED
expected_callback = DagCallbackRequest(
full_filepath=dr.dag.fileloc,
dag_id=dr.dag_id,
is_failure_callback=True,
execution_date=dr.execution_date,
msg="timed_out"
)
# Verify dag failure callback request is sent to file processor
scheduler.processor_agent.send_callback_to_execute.assert_called_once_with(expected_callback)
session.rollback()
session.close()
@parameterized.expand([
(State.SUCCESS, "success"),
(State.FAILED, "task_failure")
])
def test_dagrun_callbacks_are_called(self, state, expected_callback_msg):
"""
Test if DagRun is successful, and if Success callbacks is defined, it is sent to DagFileProcessor.
Also test that SLA Callback Function is called.
"""
dag = DAG(
dag_id='test_dagrun_callbacks_are_called',
start_date=DEFAULT_DATE,
on_success_callback=lambda x: print("success"),
on_failure_callback=lambda x: print("failed")
)
DummyOperator(task_id='dummy', dag=dag, owner='airflow')
scheduler = SchedulerJob()
scheduler.processor_agent = mock.Mock()
scheduler.processor_agent.send_callback_to_execute = mock.Mock()
scheduler._send_sla_callbacks_to_processor = mock.Mock()
# Sync DAG into DB
scheduler.dagbag.bag_dag(dag, root_dag=dag)
scheduler.dagbag.sync_to_db()
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
assert orm_dag is not None
# Create DagRun
scheduler._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
ti = dr.get_task_instance('dummy')
ti.set_state(state, session)
scheduler._schedule_dag_run(dr, 0, session)
expected_callback = DagCallbackRequest(
full_filepath=dr.dag.fileloc,
dag_id=dr.dag_id,
is_failure_callback=bool(state == State.FAILED),
execution_date=dr.execution_date,
msg=expected_callback_msg
)
# Verify dag failure callback request is sent to file processor
scheduler.processor_agent.send_callback_to_execute.assert_called_once_with(expected_callback)
# This is already tested separately
# In this test we just want to verify that this function is called
scheduler._send_sla_callbacks_to_processor.assert_called_once_with(dag)
session.rollback()
session.close()
def test_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
dag.sync_to_db(session=session)
session.flush()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr = dag.create_dagrun(
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
session=session,
)
self.assertIsNotNone(dr)
# Re-create the DAG, but remove the task
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob()
res = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual([], res)
session.rollback()
session.close()
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None): # pylint: disable=unused-argument
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
dag = self.dagbag.get_dag(dag_id)
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.next_dagrun_after_date(None),
state=State.RUNNING,
)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr.execution_date),
state=State.RUNNING,
)
ex_date = dr.execution_date
for tid, state in expected_task_states.items():
if state != State.FAILED:
continue
self.null_exec.mock_task_fail(dag_id, tid, ex_date)
try:
dag = DagBag().get_dag(dag.dag_id)
assert not isinstance(dag, SerializedDAG)
# This needs a _REAL_ dag, not the serialized version
dag.run(start_date=ex_date, end_date=ex_date, executor=self.null_exec, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TaskInstance(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', DEFAULT_DATE)
with self.assertRaises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
def test_dagrun_root_after_dagrun_unfinished(self):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
clear_db_dags()
dag_id = 'test_dagrun_states_root_future'
dag = self.dagbag.get_dag(dag_id)
dag.sync_to_db()
scheduler = SchedulerJob(
num_runs=1,
executor=self.null_exec,
subdir=dag.fileloc)
scheduler.run()
first_run = DagRun.find(dag_id=dag_id, execution_date=DEFAULT_DATE)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
self.assertEqual(ti_ids, [('current', State.SUCCESS)])
self.assertIn(first_run.state, [State.SUCCESS, State.RUNNING])
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertGreater(dag.start_date, datetime.datetime.now(timezone.utc))
# Deactivate other dags in this file
other_dag = self.dagbag.get_dag('test_task_start_date_scheduling')
other_dag.is_paused_upon_creation = True
other_dag.sync_to_db()
scheduler = SchedulerJob(executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 0)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
bf_exec = MockExecutor()
backfill = BackfillJob(
executor=bf_exec,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 1)
self.assertListEqual(
[
(TaskInstanceKey(dag.dag_id, 'dummy', DEFAULT_DATE, 1), (State.SUCCESS, None)),
],
bf_exec.sorted_tasks
)
session.commit()
scheduler = SchedulerJob(dag.fileloc,
executor=self.null_exec,
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 1)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different from DAG start dates
"""
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"), include_examples=False)
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.is_paused_upon_creation = False
dagbag.bag_dag(dag=dag, root_dag=dag)
# Deactivate other dags in this file so the scheduler doesn't waste time processing them
other_dag = self.dagbag.get_dag('test_start_date_scheduling')
other_dag.is_paused_upon_creation = True
dagbag.bag_dag(dag=other_dag, root_dag=other_dag)
dagbag.sync_to_db()
scheduler = SchedulerJob(executor=self.null_exec,
subdir=dag.fileloc,
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id)
ti1s = tiq.filter(TaskInstance.task_id == 'dummy1').all()
ti2s = tiq.filter(TaskInstance.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for task in ti2s:
self.assertEqual(task.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 0)
@conf_vars({("core", "mp_start_method"): "spawn"})
def test_scheduler_multiprocessing_with_spawn_method(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
when using "spawn" mode of multiprocessing. (Fork is default on Linux and older OSX)
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(executor=self.null_exec,
subdir=os.path.join(
TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
with create_session() as session:
self.assertEqual(
session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).count(), 0)
def test_scheduler_verify_pool_full(self):
"""
Test task instances not queued when pool is full
"""
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
BashOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full',
bash_command='echo hi',
)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"),
include_examples=False,
read_dags_from_db=True)
dagbag.bag_dag(dag=dag, root_dag=dag)
dagbag.sync_to_db()
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
session.flush()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob(executor=self.null_exec)
scheduler.processor_agent = mock.MagicMock()
# Create 2 dagruns, which will create 2 task instances.
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
scheduler._schedule_dag_run(dr, 0, session)
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag.following_schedule(dr.execution_date),
state=State.RUNNING,
)
scheduler._schedule_dag_run(dr, 0, session)
task_instances_list = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
self.assertEqual(len(task_instances_list), 1)
def test_scheduler_verify_pool_full_2_slots_per_task(self):
"""
Test task instances not queued when pool is full.
Variation with non-default pool_slots
"""
dag = DAG(
dag_id='test_scheduler_verify_pool_full_2_slots_per_task',
start_date=DEFAULT_DATE)
BashOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full_2_slots_per_task',
pool_slots=2,
bash_command='echo hi',
)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"),
include_examples=False,
read_dags_from_db=True)
dagbag.bag_dag(dag=dag, root_dag=dag)
dagbag.sync_to_db()
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full_2_slots_per_task', slots=6)
session.add(pool)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob(executor=self.null_exec)
scheduler.processor_agent = mock.MagicMock()
# Create 5 dagruns, which will create 5 task instances.
date = DEFAULT_DATE
for _ in range(5):
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.RUNNING,
)
scheduler._schedule_dag_run(dr, 0, session)
date = dag.following_schedule(date)
task_instances_list = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
# As tasks require 2 slots, only 3 can fit into 6 available
self.assertEqual(len(task_instances_list), 3)
def test_scheduler_verify_priority_and_slots(self):
"""
Test task instances with higher priority are not queued
when pool does not have enough slots.
Though tasks with lower priority might be executed.
"""
dag = DAG(
dag_id='test_scheduler_verify_priority_and_slots',
start_date=DEFAULT_DATE)
# Medium priority, not enough slots
BashOperator(
task_id='test_scheduler_verify_priority_and_slots_t0',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=2,
priority_weight=2,
bash_command='echo hi',
)
# High priority, occupies first slot
BashOperator(
task_id='test_scheduler_verify_priority_and_slots_t1',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=1,
priority_weight=3,
bash_command='echo hi',
)
# Low priority, occupies second slot
BashOperator(
task_id='test_scheduler_verify_priority_and_slots_t2',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=1,
priority_weight=1,
bash_command='echo hi',
)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"),
include_examples=False,
read_dags_from_db=True)
dagbag.bag_dag(dag=dag, root_dag=dag)
dagbag.sync_to_db()
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_priority_and_slots', slots=2)
session.add(pool)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob(executor=self.null_exec)
scheduler.processor_agent = mock.MagicMock()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
scheduler._schedule_dag_run(dr, 0, session)
task_instances_list = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
# Only second and third
self.assertEqual(len(task_instances_list), 2)
ti0 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t0').first()
self.assertEqual(ti0.state, State.SCHEDULED)
ti1 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t1').first()
self.assertEqual(ti1.state, State.QUEUED)
ti2 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t2').first()
self.assertEqual(ti2.state, State.QUEUED)
def test_verify_integrity_if_dag_not_changed(self):
# CleanUp
with create_session() as session:
session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id == 'test_verify_integrity_if_dag_not_changed'
).delete(synchronize_session=False)
dag = DAG(dag_id='test_verify_integrity_if_dag_not_changed', start_date=DEFAULT_DATE)
BashOperator(task_id='dummy', dag=dag, owner='airflow', bash_command='echo hi')
scheduler = SchedulerJob()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
scheduler.dagbag.sync_to_db()
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
assert orm_dag is not None
scheduler = SchedulerJob()
scheduler.processor_agent = mock.MagicMock()
dag = scheduler.dagbag.get_dag('test_verify_integrity_if_dag_not_changed', session=session)
scheduler._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
# Verify that DagRun.verify_integrity is not called
with mock.patch('airflow.jobs.scheduler_job.DagRun.verify_integrity') as mock_verify_integrity:
scheduled_tis = scheduler._schedule_dag_run(dr, 0, session)
mock_verify_integrity.assert_not_called()
session.flush()
assert scheduled_tis == 1
tis_count = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dr.dag_id,
TaskInstance.execution_date == dr.execution_date,
TaskInstance.task_id == dr.dag.tasks[0].task_id,
TaskInstance.state == State.SCHEDULED
).scalar()
assert tis_count == 1
latest_dag_version = SerializedDagModel.get_latest_version_hash(dr.dag_id, session=session)
assert dr.dag_hash == latest_dag_version
session.rollback()
session.close()
def test_verify_integrity_if_dag_changed(self):
# CleanUp
with create_session() as session:
session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id == 'test_verify_integrity_if_dag_changed'
).delete(synchronize_session=False)
dag = DAG(dag_id='test_verify_integrity_if_dag_changed', start_date=DEFAULT_DATE)
BashOperator(task_id='dummy', dag=dag, owner='airflow', bash_command='echo hi')
scheduler = SchedulerJob()
scheduler.dagbag.bag_dag(dag, root_dag=dag)
scheduler.dagbag.sync_to_db()
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
assert orm_dag is not None
scheduler = SchedulerJob()
scheduler.processor_agent = mock.MagicMock()
dag = scheduler.dagbag.get_dag('test_verify_integrity_if_dag_changed', session=session)
scheduler._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
dag_version_1 = SerializedDagModel.get_latest_version_hash(dr.dag_id, session=session)
assert dr.dag_hash == dag_version_1
assert scheduler.dagbag.dags == {'test_verify_integrity_if_dag_changed': dag}
assert len(scheduler.dagbag.dags.get("test_verify_integrity_if_dag_changed").tasks) == 1
# Now let's say the DAG got updated (new task got added)
BashOperator(task_id='bash_task_1', dag=dag, bash_command='echo hi')
SerializedDagModel.write_dag(dag=dag)
dag_version_2 = SerializedDagModel.get_latest_version_hash(dr.dag_id, session=session)
assert dag_version_2 != dag_version_1
scheduled_tis = scheduler._schedule_dag_run(dr, 0, session)
session.flush()
assert scheduled_tis == 2
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
assert dr.dag_hash == dag_version_2
assert scheduler.dagbag.dags == {'test_verify_integrity_if_dag_changed': dag}
assert len(scheduler.dagbag.dags.get("test_verify_integrity_if_dag_changed").tasks) == 2
tis_count = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dr.dag_id,
TaskInstance.execution_date == dr.execution_date,
TaskInstance.state == State.SCHEDULED
).scalar()
assert tis_count == 2
latest_dag_version = SerializedDagModel.get_latest_version_hash(dr.dag_id, session=session)
assert dr.dag_hash == latest_dag_version
session.rollback()
session.close()
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = MockExecutor(do_update=False)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"), include_examples=False)
dagbag.dags.clear()
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
dagbag.bag_dag(dag=dag, root_dag=dag)
dagbag.sync_to_db()
@mock.patch('airflow.jobs.scheduler_job.DagBag', return_value=dagbag)
def do_schedule(mock_dagbag):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule() # pylint: disable=no-value-for-parameter
with create_session() as session:
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == 'test_retry_still_in_executor',
TaskInstance.task_id == 'test_retry_handling_op').first()
ti.task = dag_task1
def run_with_error(ti, ignore_ti_state=False):
try:
ti.run(ignore_ti_state=ignore_ti_state)
except AirflowException:
pass
self.assertEqual(ti.try_number, 1)
# At this point, scheduler has tried to schedule the task once and
# heartbeated the executor once, which moved the state of the task from
# SCHEDULED to QUEUED and then to SCHEDULED, to fail the task execution
# we need to ignore the TaskInstance state as SCHEDULED is not a valid state to start
# executing task.
run_with_error(ti, ignore_ti_state=True)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
with create_session() as session:
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
# To verify that task does get re-queued.
executor.do_update = True
do_schedule() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
@pytest.mark.quarantined
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
start_date = six_hours_ago_to_the_hour
dag_name1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag1 = DAG(dag_name1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag1.clear()
dr = dag1.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=start_date,
state=State.RUNNING,
)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception: # pylint: disable=broad-except
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
@conf_vars({("core", "dagbag_import_error_tracebacks"): "False"})
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
with env_vars({('core', 'dags_folder'): dags_folder}):
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
@conf_vars({("core", "dagbag_import_error_tracebacks"): "False"})
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
"""
Check that new DAG files are picked up, and import errors recorded.
This is more of an "integration" test as it checks SchedulerJob, DagFileProcessorManager and
DagFileProcessor
"""
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
print("Second run")
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
@conf_vars({("core", "dagbag_import_error_tracebacks"): "False"})
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_import_error_tracebacks(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, "w") as unparseable_file:
unparseable_file.writelines(INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename, unparseable_filename)
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 3, in <module>\n'
" something()\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
self.assertEqual(
import_error.stacktrace,
expected_stacktrace.format(unparseable_filename, unparseable_filename)
)
@conf_vars({("core", "dagbag_import_error_traceback_depth"): "1"})
def test_import_error_traceback_depth(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, "w") as unparseable_file:
unparseable_file.writelines(INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename, unparseable_filename)
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
self.assertEqual(
import_error.stacktrace, expected_stacktrace.format(unparseable_filename)
)
def test_import_error_tracebacks_zip(self):
dags_folder = mkdtemp()
try:
invalid_zip_filename = os.path.join(dags_folder, "test_zip_invalid.zip")
invalid_dag_filename = os.path.join(dags_folder, "test_zip_invalid.zip", TEMP_DAG_FILENAME)
with ZipFile(invalid_zip_filename, "w") as invalid_zip_file:
invalid_zip_file.writestr(TEMP_DAG_FILENAME, INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename, invalid_zip_filename)
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 3, in <module>\n'
" something()\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
self.assertEqual(
import_error.stacktrace,
expected_stacktrace.format(invalid_dag_filename, invalid_dag_filename)
)
@conf_vars({("core", "dagbag_import_error_traceback_depth"): "1"})
def test_import_error_tracebacks_zip_depth(self):
dags_folder = mkdtemp()
try:
invalid_zip_filename = os.path.join(dags_folder, "test_zip_invalid.zip")
invalid_dag_filename = os.path.join(dags_folder, "test_zip_invalid.zip", TEMP_DAG_FILENAME)
with ZipFile(invalid_zip_filename, "w") as invalid_zip_file:
invalid_zip_file.writestr(TEMP_DAG_FILENAME, INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename, invalid_zip_filename)
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
self.assertEqual(
import_error.stacktrace, expected_stacktrace.format(invalid_dag_filename)
)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = {
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
'test_ignore_this.py',
}
for root, _, files in os.walk(TEST_DAG_FOLDER): # pylint: disable=too-many-nested-blocks
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(root, file_name))
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
ignored_files = {
'helper.py',
}
example_dag_folder = airflow.example_dags.__path__[0]
for root, _, files in os.walk(example_dag_folder): # pylint: disable=too-many-nested-blocks
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py'] and file_name not in ignored_files:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
smart_sensor_dag_folder = airflow.smart_sensor_dags.__path__[0]
for root, _, files in os.walk(smart_sensor_dag_folder):
for file_name in files:
if (file_name.endswith('.py') or file_name.endswith('.zip')) and \
file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAG_FOLDER,
include_examples=True,
include_smart_sensor=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_adopt_or_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(0, scheduler.adopt_or_reset_orphaned_tasks(session=session))
def test_adopt_or_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
external_trigger=True,
session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
session.merge(ti)
session.merge(dr1)
session.commit()
num_reset_tis = scheduler.adopt_or_reset_orphaned_tasks(session=session)
self.assertEqual(1, num_reset_tis)
def test_adopt_or_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_adopt_or_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
session.add(scheduler)
session.flush()
dr1 = dag.create_dagrun(run_type=DagRunType.BACKFILL_JOB,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
session.merge(ti)
session.merge(dr1)
session.flush()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, scheduler.adopt_or_reset_orphaned_tasks(session=session))
session.rollback()
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.flush()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.flush()
self.assertEqual(0, scheduler.adopt_or_reset_orphaned_tasks(session=session))
session.rollback()
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
session.add(scheduler)
session.flush()
dr1 = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
tis[0].queued_by_job_id = scheduler.id
session.merge(dr1)
session.merge(tis[0])
session.flush()
self.assertEqual(0, scheduler.adopt_or_reset_orphaned_tasks(session=session))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
session.add(scheduler)
session.flush()
dr1 = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
tis[0].queued_by_job_id = scheduler.id
session.merge(dr1)
session.merge(tis[0])
session.flush()
self.assertEqual(0, scheduler.adopt_or_reset_orphaned_tasks(session=session))
session.rollback()
def test_adopt_or_reset_orphaned_tasks_stale_scheduler_jobs(self):
dag_id = 'test_adopt_or_reset_orphaned_tasks_stale_scheduler_jobs'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
DummyOperator(task_id='task1', dag=dag)
DummyOperator(task_id='task2', dag=dag)
scheduler_job = SchedulerJob()
session = settings.Session()
scheduler_job.state = State.RUNNING
scheduler_job.latest_heartbeat = timezone.utcnow()
session.add(scheduler_job)
old_job = SchedulerJob()
old_job.state = State.RUNNING
old_job.latest_heartbeat = timezone.utcnow() - timedelta(minutes=15)
session.add(old_job)
session.flush()
dr1 = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
session=session
)
ti1, ti2 = dr1.get_task_instances(session=session)
dr1.state = State.RUNNING
ti1.state = State.SCHEDULED
ti1.queued_by_job_id = old_job.id
session.merge(dr1)
session.merge(ti1)
ti2.state = State.SCHEDULED
ti2.queued_by_job_id = scheduler_job.id
session.merge(ti2)
session.flush()
num_reset_tis = scheduler_job.adopt_or_reset_orphaned_tasks(session=session)
self.assertEqual(1, num_reset_tis)
session.refresh(ti1)
self.assertEqual(None, ti1.state)
session.refresh(ti2)
self.assertEqual(State.SCHEDULED, ti2.state)
session.rollback()
def test_send_sla_callbacks_to_processor_sla_disabled(self):
"""Test SLA Callbacks are not sent when check_slas is False"""
dag_id = 'test_send_sla_callbacks_to_processor_sla_disabled'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
DummyOperator(task_id='task1', dag=dag)
with patch.object(settings, "CHECK_SLAS", False):
scheduler_job = SchedulerJob()
mock_agent = mock.MagicMock()
scheduler_job.processor_agent = mock_agent
scheduler_job._send_sla_callbacks_to_processor(dag)
scheduler_job.processor_agent.send_sla_callback_request_to_execute.assert_not_called()
def test_send_sla_callbacks_to_processor_sla_no_task_slas(self):
"""Test SLA Callbacks are not sent when no task SLAs are defined"""
dag_id = 'test_send_sla_callbacks_to_processor_sla_no_task_slas'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
DummyOperator(task_id='task1', dag=dag)
with patch.object(settings, "CHECK_SLAS", True):
scheduler_job = SchedulerJob()
mock_agent = mock.MagicMock()
scheduler_job.processor_agent = mock_agent
scheduler_job._send_sla_callbacks_to_processor(dag)
scheduler_job.processor_agent.send_sla_callback_request_to_execute.assert_not_called()
def test_send_sla_callbacks_to_processor_sla_with_task_slas(self):
"""Test SLA Callbacks are sent to the DAG Processor when SLAs are defined on tasks"""
dag_id = 'test_send_sla_callbacks_to_processor_sla_with_task_slas'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
DummyOperator(task_id='task1', dag=dag, sla=timedelta(seconds=60))
with patch.object(settings, "CHECK_SLAS", True):
scheduler_job = SchedulerJob()
mock_agent = mock.MagicMock()
scheduler_job.processor_agent = mock_agent
scheduler_job._send_sla_callbacks_to_processor(dag)
scheduler_job.processor_agent.send_sla_callback_request_to_execute.assert_called_once_with(
full_filepath=dag.fileloc, dag_id=dag_id
)
@pytest.mark.xfail(reason="Work out where this goes")
def test_task_with_upstream_skip_process_task_instances():
"""
Test if _process_task_instances puts a task instance into SKIPPED state if any of its
upstream tasks are skipped according to TriggerRuleDep.
"""
clear_db_runs()
with DAG(
dag_id='test_task_with_upstream_skip_dag',
start_date=DEFAULT_DATE,
schedule_interval=None
) as dag:
dummy1 = DummyOperator(task_id='dummy1')
dummy2 = DummyOperator(task_id="dummy2")
dummy3 = DummyOperator(task_id="dummy3")
[dummy1, dummy2] >> dummy3
# dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.MANUAL,
state=State.RUNNING,
execution_date=DEFAULT_DATE)
assert dr is not None
with create_session() as session:
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
# Set dummy1 to skipped and dummy2 to success. dummy3 remains as none.
tis[dummy1.task_id].state = State.SKIPPED
tis[dummy2.task_id].state = State.SUCCESS
assert tis[dummy3.task_id].state == State.NONE
# dag_runs = DagRun.find(dag_id='test_task_with_upstream_skip_dag')
# dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
with create_session() as session:
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
assert tis[dummy1.task_id].state == State.SKIPPED
assert tis[dummy2.task_id].state == State.SUCCESS
# dummy3 should be skipped because dummy1 is skipped.
assert tis[dummy3.task_id].state == State.SKIPPED
class TestSchedulerJobQueriesCount(unittest.TestCase):
"""
These tests are designed to detect changes in the number of queries for
different DAG files. These tests allow easy detection when a change is
made that affects the performance of the SchedulerJob.
"""
def setUp(self) -> None:
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
clear_db_serialized_dags()
clear_db_dags()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count
# One DAG with one task per DAG file
(23, 1, 1), # noqa
# One DAG with five tasks per DAG file
(23, 1, 5), # noqa
# 10 DAGs with 10 tasks per DAG file
(95, 10, 10), # noqa
]
)
@pytest.mark.quarantined
def test_execute_queries_count_with_harvested_dags(self, expected_query_count, dag_count, task_count):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": "1d",
"PERF_SCHEDULE_INTERVAL": "30m",
"PERF_SHAPE": "no_structure",
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
('core', 'load_examples'): 'False',
('core', 'store_serialized_dags'): 'True',
}), mock.patch.object(settings, 'STORE_SERIALIZED_DAGS', True):
dagruns = []
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE, include_examples=False)
dagbag.sync_to_db()
dag_ids = dagbag.dag_ids
dagbag = DagBag(read_dags_from_db=True)
for i, dag_id in enumerate(dag_ids):
dag = dagbag.get_dag(dag_id)
dr = dag.create_dagrun(
state=State.RUNNING,
run_id=f"{DagRunType.MANUAL.value}__{i}",
dag_hash=dagbag.dags_hash[dag.dag_id],
)
dagruns.append(dr)
for ti in dr.get_task_instances():
ti.set_state(state=State.SCHEDULED)
mock_agent = mock.MagicMock()
job = SchedulerJob(subdir=PERF_DAGS_FOLDER, num_runs=1)
job.executor = MockExecutor(do_update=False)
job.heartbeat = mock.MagicMock()
job.processor_agent = mock_agent
with assert_queries_count(expected_query_count):
with mock.patch.object(DagRun, 'next_dagruns_to_examine') as mock_dagruns:
mock_dagruns.return_value = dagruns
job._run_scheduler_loop()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count, start_ago, schedule_interval, shape
# One DAG with one task per DAG file
([10, 10, 10, 10], 1, 1, "1d", "None", "no_structure"), # noqa
([10, 10, 10, 10], 1, 1, "1d", "None", "linear"), # noqa
([22, 14, 14, 14], 1, 1, "1d", "@once", "no_structure"), # noqa
([22, 14, 14, 14], 1, 1, "1d", "@once", "linear"), # noqa
([22, 24, 27, 30], 1, 1, "1d", "30m", "no_structure"), # noqa
([22, 24, 27, 30], 1, 1, "1d", "30m", "linear"), # noqa
([22, 24, 27, 30], 1, 1, "1d", "30m", "binary_tree"), # noqa
([22, 24, 27, 30], 1, 1, "1d", "30m", "star"), # noqa
([22, 24, 27, 30], 1, 1, "1d", "30m", "grid"), # noqa
# One DAG with five tasks per DAG file
([10, 10, 10, 10], 1, 5, "1d", "None", "no_structure"), # noqa
([10, 10, 10, 10], 1, 5, "1d", "None", "linear"), # noqa
([22, 14, 14, 14], 1, 5, "1d", "@once", "no_structure"), # noqa
([23, 15, 15, 15], 1, 5, "1d", "@once", "linear"), # noqa
([22, 24, 27, 30], 1, 5, "1d", "30m", "no_structure"), # noqa
([23, 26, 30, 34], 1, 5, "1d", "30m", "linear"), # noqa
([23, 26, 30, 34], 1, 5, "1d", "30m", "binary_tree"), # noqa
([23, 26, 30, 34], 1, 5, "1d", "30m", "star"), # noqa
([23, 26, 30, 34], 1, 5, "1d", "30m", "grid"), # noqa
# 10 DAGs with 10 tasks per DAG file
([10, 10, 10, 10], 10, 10, "1d", "None", "no_structure"), # noqa
([10, 10, 10, 10], 10, 10, "1d", "None", "linear"), # noqa
([85, 38, 38, 38], 10, 10, "1d", "@once", "no_structure"), # noqa
([95, 51, 51, 51], 10, 10, "1d", "@once", "linear"), # noqa
([85, 99, 99, 99], 10, 10, "1d", "30m", "no_structure"), # noqa
([95, 125, 125, 125], 10, 10, "1d", "30m", "linear"), # noqa
([95, 119, 119, 119], 10, 10, "1d", "30m", "binary_tree"), # noqa
([95, 119, 119, 119], 10, 10, "1d", "30m", "star"), # noqa
([95, 119, 119, 119], 10, 10, "1d", "30m", "grid"), # noqa
# pylint: enable=bad-whitespace
]
)
@pytest.mark.quarantined
def test_process_dags_queries_count(
self, expected_query_counts, dag_count, task_count, start_ago, schedule_interval, shape
):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": start_ago,
"PERF_SCHEDULE_INTERVAL": schedule_interval,
"PERF_SHAPE": shape,
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
('core', 'store_serialized_dags'): 'True',
}), mock.patch.object(settings, 'STORE_SERIALIZED_DAGS', True):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE, include_examples=False)
dagbag.sync_to_db()
mock_agent = mock.MagicMock()
job = SchedulerJob(subdir=PERF_DAGS_FOLDER, num_runs=1)
job.executor = MockExecutor(do_update=False)
job.heartbeat = mock.MagicMock()
job.processor_agent = mock_agent
for expected_query_count in expected_query_counts:
with create_session() as session:
with assert_queries_count(expected_query_count):
job._do_scheduling(session)
| [] | [] | [
"AIRFLOW__CORE__DAGS_FOLDER"
] | [] | ["AIRFLOW__CORE__DAGS_FOLDER"] | python | 1 | 0 | |
database/driver.go | // Package database provides the Database interface.
// All database drivers must implement this interface, register themselves,
// optionally provide a `WithInstance` function and pass the tests
// in package database/testing.
package database
import (
"fmt"
"io"
nurl "net/url"
"sync"
)
var (
ErrLocked = fmt.Errorf("can't acquire lock")
)
const NilVersion int = -1
var driversMu sync.RWMutex
var drivers = make(map[string]Driver)
// Driver is the interface every database driver must implement.
//
// How to implement a database driver?
// 1. Implement this interface.
// 2. Optionally, add a function named `WithInstance`.
// This function should accept an existing DB instance and a Config{} struct
// and return a driver instance.
// 3. Add a test that calls database/testing.go:Test()
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
// All other functions are tested by tests in database/testing.
// Saves you some time and makes sure all database drivers behave the same way.
// 5. Call Register in init().
// 6. Create a migrate/cli/build_<driver-name>.go file
// 7. Add driver name in 'DATABASE' variable in Makefile
//
// Guidelines:
// * Don't try to correct user input. Don't assume things.
// When in doubt, return an error and explain the situation to the user.
// * All configuration input must come from the URL string in func Open()
// or the Config{} struct in WithInstance. Don't os.Getenv().
type Driver interface {
// Open returns a new driver instance configured with parameters
// coming from the URL string. Migrate will call this function
// only once per instance.
Open(url string) (Driver, error)
// Close closes the underlying database instance managed by the driver.
// Migrate will call this function only once per instance.
Close() error
// Lock should acquire a database lock so that only one migration process
// can run at a time. Migrate will call this function before Run is called.
// If the implementation can't provide this functionality, return nil.
// Return database.ErrLocked if database is already locked.
Lock() error
// Unlock should release the lock. Migrate will call this function after
// all migrations have been run.
Unlock() error
// Run applies a migration to the database. migration is garantueed to be not nil.
Run(migration io.Reader) error
// SetVersion saves version and dirty state.
// Migrate will call this function before and after each call to Run.
// version must be >= -1. -1 means NilVersion.
SetVersion(version int, dirty bool) error
// Version returns the currently active version and if the database is dirty.
// When no migration has been applied, it must return version -1.
// Dirty means, a previous migration failed and user interaction is required.
Version() (version int, dirty bool, err error)
// Drop deletes everything in the database.
// Note that this is a breaking action, a new call to Open() is necessary to
// ensure subsequent calls work as expected.
Drop() error
}
// Open returns a new driver instance.
func Open(url string) (Driver, error) {
u, err := nurl.Parse(url)
if err != nil {
return nil, fmt.Errorf("Unable to parse URL. Did you escape all reserved URL characters? "+
"See: https://github.com/orkusinc/migrate#database-urls Error: %v", err)
}
if u.Scheme == "" {
return nil, fmt.Errorf("database driver: invalid URL scheme")
}
driversMu.RLock()
d, ok := drivers[u.Scheme]
driversMu.RUnlock()
if !ok {
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme)
}
return d.Open(url)
}
// Register globally registers a driver.
func Register(name string, driver Driver) {
driversMu.Lock()
defer driversMu.Unlock()
if driver == nil {
panic("Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("Register called twice for driver " + name)
}
drivers[name] = driver
}
// List lists the registered drivers
func List() []string {
driversMu.RLock()
defer driversMu.RUnlock()
names := make([]string, 0, len(drivers))
for n := range drivers {
names = append(names, n)
}
return names
}
| [] | [] | [] | [] | [] | go | 0 | 0 | |
post_patch_to_portal.py | import os, sys, argparse, subprocess
import json
from dcicutils import ff_utils, s3_utils
import boto3
'''
Possible future upgrades:
- Build pubic docker image within post/patch script (would require Dockerfiles for each image)
- Replace try/except with query to database for the post/patch steps
'''
def main(ff_env='fourfront-cgapwolf', skip_software=False, skip_file_format=False,
skip_workflow=False, skip_metaworkflow=False, skip_file_reference=False,
skip_cwl=False, skip_ecr=False, cwl_bucket='', account='', region='',
del_prev_version=False, ignore_key_conflict=False,
ugrp_unrelated=False, action='store_true'):
"""post / patch contents from portal_objects to the portal"""
if os.environ.get('GLOBAL_BUCKET_ENV', ''): # new cgap account
s3 = s3_utils.s3Utils(env=ff_env)
keycgap = s3.get_access_keys('access_key_admin')
else:
keycgap = ff_utils.get_authentication_with_server(ff_env=ff_env)
# Version
with open("VERSION") as f:
version = f.readlines()[0].strip()
# Pipeline - REMOVE INPUT
with open("PIPELINE") as f:
pipeline = f.readlines()[0].strip()
# Software
if not skip_software:
print("Processing software...")
with open('portal_objects/software.json') as f:
d = json.load(f)
for dd in d:
print(" processing uuid %s" % dd['uuid'])
try:
ff_utils.post_metadata(dd, 'Software', key=keycgap)
except:
ff_utils.patch_metadata(dd, dd['uuid'], key=keycgap)
# File formats
if not skip_file_format:
print("Processing file format...")
with open('portal_objects/file_format.json') as f:
d = json.load(f)
for dd in d:
print(" processing uuid %s" % dd['uuid'])
try:
ff_utils.post_metadata(dd, 'FileFormat', key=keycgap)
except Exception as e:
if 'Keys conflict' in str(e):
if ignore_key_conflict:
pass
else:
raise(e)
else:
ff_utils.patch_metadata(dd, dd['uuid'], key=keycgap)
# Workflows
if not skip_workflow: #going to add in PIPELINE replacement for ecr url.
print("Processing workflow...")
if cwl_bucket != '' and account != '' and region != '' and pipeline != '':
wf_dir = "portal_objects/workflows"
files = os.listdir(wf_dir)
for fn in files:
if fn.endswith('.json'):
print(" processing file %s" % fn)
with open(os.path.join(wf_dir, fn), 'r') as f:
d = json.load(f)
if del_prev_version:
# Clean previous version and aliases if present
if d.get('previous_version'):
del d['previous_version']
if d.get('aliases'):
d['aliases'] = [d['aliases'][0]]
# replace VERSION variable with correct version
d["aliases"][0] = d["aliases"][0].replace("VERSION",version)
for k in ["app_version", "docker_image_name", "name"]:
d[k] = d[k].replace("VERSION",version)
# replace CWLBUCKET and VERSION variables in cwl_directory_url_v1
d["cwl_directory_url_v1"] = d["cwl_directory_url_v1"].replace("CWLBUCKET", cwl_bucket).replace("PIPELINE", pipeline).replace("VERSION", version)
# replace ACCOUNT and VERSION variables for docker_image_name
account_region = account+".dkr.ecr."+region+".amazonaws.com"
d["docker_image_name"] = d["docker_image_name"].replace("ACCOUNT",account_region).replace("VERSION",version)
# Patch
try:
ff_utils.post_metadata(d, 'Workflow', key=keycgap)
except:
ff_utils.patch_metadata(d, d['uuid'], key=keycgap)
else:
# throw an error if the cwl bucket is not provided
print("ERROR: when run without --skip-workflow, user must provide input for:\n --cwl-bucket (user provided: "+cwl_bucket+")\n --account (user provided: "+account+")\n --region (user provided: "+region+")\n --pipeline (user provied: "+pipeline+", choices are \'snv\' or \'sv\')")
sys.exit(1)
# File reference
if not skip_file_reference:
print("Processing file reference...")
with open('portal_objects/file_reference.json') as f:
d = json.load(f)
for dd in d:
print(" processing uuid %s" % dd['uuid'])
try:
ff_utils.post_metadata(dd, 'FileReference', key=keycgap)
except:
ff_utils.patch_metadata(dd, dd['uuid'], key=keycgap)
# Metaworkflows
if not skip_metaworkflow:
print("Processing metaworkflow...")
wf_dir = "portal_objects/metaworkflows"
files = os.listdir(wf_dir)
for fn in files:
if fn.endswith('.json'):
print(" processing file %s" % fn)
with open(os.path.join(wf_dir, fn), 'r') as f:
d = json.load(f)
for k in ['title','version']:
d[k] = d[k].replace("VERSION", version)
if del_prev_version:
# Clean previous version if present
if d.get('previous_version'):
del d['previous_version']
if ugrp_unrelated:
uuid_ugrp_unrl = 'eac862c0-8c87-4838-83cb-9a77412bff6f'
for input in d['input']:
if input['argument_name'] == 'unrelated':
input['files'] = [{"file": uuid_ugrp_unrl}]
try:
ff_utils.post_metadata(d, 'MetaWorkflow', key=keycgap)
except:
ff_utils.patch_metadata(d, d['uuid'], key=keycgap)
# CWLs
if not skip_cwl:
print("Processing cwl files...")
if cwl_bucket != '' and account != '' and region != '' and pipeline != '':
wf_dir = "cwl"
s3 = boto3.resource('s3')
#mk tmp dir for modified cwls
os.mkdir(wf_dir+"/upload")
account_region = account+".dkr.ecr."+region+".amazonaws.com"
files = os.listdir(wf_dir)
for fn in files:
if fn.endswith('.cwl'):
# set original file path and path for s3
file_path = wf_dir+'/'+fn
s3_path_and_file = pipeline+'/'+version+'/'+fn
# separate workflows, which can be automatically uploaded to s3 without edits ...
if fn.startswith('workflow'):
print(" processing file %s" % fn)
s3.meta.client.upload_file(file_path, cwl_bucket, s3_path_and_file, ExtraArgs={'ACL':'public-read'})
# ... from CommandLineTool files which have the dockerPull that needs modification
else:
print(" processing file %s" % fn)
with open(file_path, 'r') as f:
with open(wf_dir+"/upload/"+fn, 'w') as w:
for line in f:
if "dockerPull" in line:
# modify line for output file by replacing generic variables
line = line.replace("ACCOUNT",account_region).replace("VERSION",version)
w.write(line)
# once modified, upload to s3
upload_path_and_file = wf_dir+"/upload/"+fn
s3.meta.client.upload_file(upload_path_and_file, cwl_bucket, s3_path_and_file, ExtraArgs={'ACL':'public-read'})
# delete file to allow tmp folder to be deleted at the end
os.remove(upload_path_and_file)
# clean the directory from github repo
os.rmdir(wf_dir+"/upload")
else:
# throw an error if the necessary input variables are not provided
print("ERROR: when run without --skip-cwl, user must provide input for:\n --cwl-bucket (user provided: "+cwl_bucket+")\n --account (user provided: "+account+")\n --region (user provided: "+region+")\n --pipeline (user provied: "+pipeline+", choices are \'snv\' or \'sv\')")
sys.exit(1)
if not skip_ecr:
print("Processing ECR images...")
if account != '' and region != '' and pipeline != '':
account_region = account+".dkr.ecr."+region+".amazonaws.com"
# generic bash commands to be modified to correct version and account information
snv_images = '''
# login
echo "For this to work, proper permissions are required within the EC2 environment"
aws ecr get-login-password --region REGION | docker login --username AWS --password-stdin ACCOUNT
# cgap on docker is snv on ECR
docker pull cgap/cgap:VERSION
docker tag cgap/cgap:VERSION ACCOUNT/snv:VERSION
docker push ACCOUNT/snv:VERSION
# md5 is same on docker and ECR
docker pull cgap/md5:VERSION
docker tag cgap/md5:VERSION ACCOUNT/md5:VERSION
docker push ACCOUNT/md5:VERSION
# fastqc is same on docker and ECR
docker pull cgap/fastqc:VERSION
docker tag cgap/fastqc:VERSION ACCOUNT/fastqc:VERSION
docker push ACCOUNT/fastqc:VERSION
'''
sv_images = '''
# login
echo "For this to work, proper permissions are required within the EC2 environment"
aws ecr get-login-password --region REGION | docker login --username AWS --password-stdin ACCOUNT
# cgap-manta on docker is manta on ECR
docker pull cgap/cgap-manta:VERSION
docker tag cgap/cgap-manta:VERSION ACCOUNT/manta:VERSION
docker push ACCOUNT/manta:VERSION
# cnv is same on docker and ECR
docker pull cgap/cnv:VERSION
docker tag cgap/cnv:VERSION ACCOUNT/cnv:VERSION
docker push ACCOUNT/cnv:VERSION
'''
if pipeline == 'snv':
cmd = snv_images.replace("REGION", region).replace("ACCOUNT", account_region).replace("VERSION", version)
# replace all variables
elif pipeline == 'cnv':
cmd = sv_images.replace("REGION", region).replace("ACCOUNT", account_region).replace("VERSION", version)
# push create and push images
subprocess.check_call(cmd, shell=True)
print("ECR images created!")
else:
# throw an error if the cwl bucket is not provided
print("ERROR: when run without --skip-ecr, user must provide input for:\n --account (user provided: "+account+")\n --region (user provided: "+region+")\n --pipeline (user provied: "+pipeline+", choices are \'snv\' or \'sv\')")
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ff-env', default='fourfront-cgapwolf')
parser.add_argument('--skip-software', action='store_true')
parser.add_argument('--skip-file-format', action='store_true')
parser.add_argument('--skip-workflow', action='store_true')
parser.add_argument('--skip-metaworkflow', action='store_true')
parser.add_argument('--skip-file-reference', action='store_true')
parser.add_argument('--skip-cwl', action='store_true')
parser.add_argument('--skip-ecr', action='store_true')
parser.add_argument('--cwl-bucket', default='')
parser.add_argument('--account', default='')
parser.add_argument('--region', default='')
parser.add_argument('--del-prev-version', action='store_true')
parser.add_argument('--ignore-key-conflict', action='store_true')
parser.add_argument('--ugrp-unrelated', action='store_true')
args = parser.parse_args()
main(ff_env=args.ff_env, skip_software=args.skip_software,
skip_file_format=args.skip_file_format, skip_workflow=args.skip_workflow,
skip_metaworkflow=args.skip_metaworkflow, skip_file_reference=args.skip_file_reference,
skip_cwl=args.skip_cwl, skip_ecr=args.skip_ecr, cwl_bucket=args.cwl_bucket, account=args.account,
region=args.region, del_prev_version=args.del_prev_version,
ignore_key_conflict=args.ignore_key_conflict, ugrp_unrelated=args.ugrp_unrelated)
| [] | [] | [
"GLOBAL_BUCKET_ENV"
] | [] | ["GLOBAL_BUCKET_ENV"] | python | 1 | 0 | |
wsgi.py | """
WSGI config for mwach project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mwach.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
internal/deploy/deploy.go | package deploy
import (
"fmt"
"os"
"github.com/iac-io/myiac/internal/commandline"
"github.com/iac-io/myiac/internal/util"
)
type Deployer interface {
Deploy(appName string, environment string, propertiesMap map[string]string, dryRun bool)
}
type baseDeployer struct {
helmDeployer
chartsPath string
}
func NewDeployerWithCharts(chartsPath string) Deployer {
if chartsPath == "" {
chartsPath = getBaseChartsPath()
}
return &baseDeployer{chartsPath: chartsPath}
}
func NewDeployer() Deployer {
return &baseDeployer{chartsPath: getBaseChartsPath()}
}
// moneycolfrontend, moneycolserver, elasticsearch, traefik, traefik-dev, collections-api
func (bd baseDeployer) Deploy(appName string, environment string, propertiesMap map[string]string, dryRun bool) {
helmSetParams := make(map[string]string)
addPropertiesToSetParams(helmSetParams, propertiesMap)
cmdRunner := commandline.NewEmpty()
helmDeployer := NewHelmDeployer(bd.chartsPath, cmdRunner, nil)
deployment := HelmDeployment{
AppName: appName,
Environment: environment,
HelmSetParams: helmSetParams,
DryRun: dryRun,
}
helmDeployer.Deploy(&deployment)
}
func addPropertiesToSetParams(helmSetParams map[string]string, propertiesMap map[string]string) {
for k, v := range propertiesMap {
fmt.Printf("Adding property: %s -> %s", k, v)
helmSetParams[k] = v
}
fmt.Printf("Helm Set Params %v", helmSetParams)
}
func getBaseChartsPath() string {
chartsPath := os.Getenv("CHARTS_PATH")
if chartsPath != "" {
return chartsPath
}
chartsPath = util.CurrentExecutableDir() + "/charts"
return chartsPath
}
| [
"\"CHARTS_PATH\""
] | [] | [
"CHARTS_PATH"
] | [] | ["CHARTS_PATH"] | go | 1 | 0 | |
test/kubetest/k8s.go | package kubetest
import (
"bufio"
"context"
"errors"
"fmt"
"net/url"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"time"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
arv1beta1 "k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"github.com/networkservicemesh/networkservicemesh/k8s/pkg/apis/networkservice/v1alpha1"
"github.com/networkservicemesh/networkservicemesh/k8s/pkg/networkservice/clientset/versioned"
"github.com/networkservicemesh/networkservicemesh/k8s/pkg/networkservice/namespace"
"github.com/networkservicemesh/networkservicemesh/test/kubetest/pods"
nsmrbac "github.com/networkservicemesh/networkservicemesh/test/kubetest/rbac"
)
const (
// PodStartTimeout - Default pod startup time
PodStartTimeout = 3 * time.Minute
podDeleteTimeout = 15 * time.Second
podExecTimeout = 1 * time.Minute
podGetLogTimeout = 1 * time.Minute
)
const (
envUseIPv6 = "USE_IPV6"
envUseIPv6Default = false
)
type PodDeployResult struct {
pod *v1.Pod
err error
}
func waitTimeout(logPrefix string, wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return true
case <-time.After(timeout):
logrus.Errorf("%v Timeout in waitTimeout", logPrefix)
return false
}
}
func (k8s *K8s) createAndBlock(client kubernetes.Interface, namespace string, timeout time.Duration, pods ...*v1.Pod) []*PodDeployResult {
var wg sync.WaitGroup
resultChan := make(chan *PodDeployResult, len(pods))
for _, pod := range pods {
wg.Add(1)
go func(pod *v1.Pod) {
defer wg.Done()
var err error
createdPod, err := client.CoreV1().Pods(namespace).Create(pod)
// We need to have non nil pod in any case.
if createdPod != nil && createdPod.Name != "" {
pod = createdPod
}
if err != nil {
logrus.Errorf("Failed to create pod. Cause: %v pod: %v", err, pod)
resultChan <- &PodDeployResult{pod, err}
return
}
pod, err = blockUntilPodReady(client, timeout, pod)
if err != nil {
logrus.Errorf("blockUntilPodReady failed. Cause: %v pod: %v", err, pod)
resultChan <- &PodDeployResult{pod, err}
return
}
// Let's fetch more information about pod created
updated_pod, err := client.CoreV1().Pods(namespace).Get(pod.Name, metaV1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to Get endpoint. Cause: %v pod: %v", err, pod)
resultChan <- &PodDeployResult{pod, err}
return
}
resultChan <- &PodDeployResult{updated_pod, nil}
}(pod)
}
if !waitTimeout(fmt.Sprintf("createAndBlock with pods: %v", pods), &wg, timeout) {
logrus.Errorf("Failed to deploy pod, trying to get any information")
results := []*PodDeployResult{}
for _, p := range pods {
pod, err := client.CoreV1().Pods(namespace).Get(p.Name, metaV1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to get pod information: %v", err)
}
k8s.DescribePod(pod)
if pod != nil {
logrus.Infof("Pod information: %v", pod)
for _, cs := range pod.Status.ContainerStatuses {
if !cs.Ready {
logs, _ := k8s.GetLogs(pod, cs.Name)
logrus.Infof("Pod %v container not started: %v Logs: %v", pod.Name, cs.Name, logs)
}
}
}
results = append(results, &PodDeployResult{
err: fmt.Errorf("Failed to deploy pod"),
pod: pod,
})
return results
}
return nil
}
results := make([]*PodDeployResult, len(pods))
named := map[string]*PodDeployResult{}
for i := 0; i < len(pods); i++ {
pod := <-resultChan
named[pod.pod.Name] = pod
}
for i := 0; i < len(pods); i++ {
results[i] = named[pods[i].Name]
}
// We need to put pods in right order
return results
}
func blockUntilPodReady(client kubernetes.Interface, timeout time.Duration, sourcePod *v1.Pod) (*v1.Pod, error) {
st := time.Now()
infoPrinted := false
for {
pod, err := client.CoreV1().Pods(sourcePod.Namespace).Get(sourcePod.Name, metaV1.GetOptions{})
// To be sure we not loose pod information.
if pod == nil {
pod = sourcePod
}
if err != nil {
return pod, err
}
if pod != nil && pod.Status.Phase != v1.PodPending {
break
}
if time.Since(st) > timeout/2 && !infoPrinted {
logrus.Infof("Pod deploy half time passed: %v", pod)
infoPrinted = true
}
time.Sleep(time.Millisecond * time.Duration(50))
if time.Since(st) > timeout {
return pod, podTimeout(pod)
}
}
watcher, err := client.CoreV1().Pods(sourcePod.Namespace).Watch(metaV1.SingleObject(metaV1.ObjectMeta{Name: sourcePod.Name}))
if err != nil {
return sourcePod, err
}
for {
select {
case _, ok := <-watcher.ResultChan():
if !ok {
return sourcePod, fmt.Errorf("Some error watching for pod status")
}
pod, err := client.CoreV1().Pods(sourcePod.Namespace).Get(sourcePod.Name, metaV1.GetOptions{})
if err == nil {
if isPodReady(pod) {
watcher.Stop()
return pod, nil
}
}
case <-time.After(timeout):
return sourcePod, podTimeout(sourcePod)
}
}
}
func podTimeout(pod *v1.Pod) error {
return fmt.Errorf("Timeout during waiting for pod change status for pod %s %v status: ", pod.Name, pod.Status.Conditions)
}
func isPodReady(pod *v1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
// If one of containers is not yet ready, return false
return false
}
}
return true
}
func blockUntilPodWorking(client kubernetes.Interface, context context.Context, pod *v1.Pod) error {
exists := make(chan error)
go func() {
for {
pod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metaV1.GetOptions{})
if err != nil {
// Pod not found
close(exists)
break
}
if pod == nil {
close(exists)
break
}
<-time.After(time.Millisecond * time.Duration(50))
}
}()
select {
case <-context.Done():
return podTimeout(pod)
case err, ok := <-exists:
if err != nil {
return err
}
if ok {
return errors.New("unintended")
}
return nil
}
}
type K8s struct {
clientset kubernetes.Interface
versionedClientSet *versioned.Clientset
pods []*v1.Pod
config *rest.Config
roles []nsmrbac.Role
namespace string
apiServerHost string
useIPv6 bool
forwardingPlane string
}
func NewK8s(prepare bool) (*K8s, error) {
client, err := NewK8sWithoutRoles(prepare)
client.roles, _ = client.CreateRoles("admin", "view", "binding")
return client, err
}
func NewK8sWithoutRoles(prepare bool) (*K8s, error) {
path := os.Getenv("KUBECONFIG")
if len(path) == 0 {
path = os.Getenv("HOME") + "/.kube/config"
}
config, err := clientcmd.BuildConfigFromFlags("", path)
Expect(err).To(BeNil())
client := K8s{
pods: []*v1.Pod{},
}
client.setForwardingPlane()
client.config = config
client.clientset, err = kubernetes.NewForConfig(config)
Expect(err).To(BeNil())
client.apiServerHost = config.Host
client.initNamespace()
client.setIPVersion()
client.versionedClientSet, err = versioned.NewForConfig(config)
Expect(err).To(BeNil())
if prepare {
start := time.Now()
client.Prepare("nsmgr", "nsmd", "vppagent", "vpn", "icmp", "nsc", "source", "dest")
client.CleanupCRDs()
client.CleanupServices("nsm-admission-webhook-svc")
client.CleanupDeployments()
client.CleanupMutatingWebhookConfigurations()
client.CleanupSecrets("nsm-admission-webhook-certs")
client.CleanupConfigMaps()
_ = nsmrbac.DeleteAllRoles(client.clientset)
logrus.Printf("Cleanup done: %v", time.Since(start))
}
return &client, nil
}
// Immediate deletion does not wait for confirmation that the running resource has been terminated.
// The resource may continue to run on the cluster indefinitely
func (k8s *K8s) deletePodForce(pod *v1.Pod) error {
graceTimeout := int64(0)
delOpt := &metaV1.DeleteOptions{
GracePeriodSeconds: &graceTimeout,
}
err := k8s.clientset.CoreV1().Pods(pod.Namespace).Delete(pod.Name, delOpt)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), podDeleteTimeout)
defer cancel()
err = blockUntilPodWorking(k8s.clientset, ctx, pod)
if err != nil {
return err
}
return nil
}
func (k8s *K8s) checkAPIServerAvailable() {
u, err := url.Parse(k8s.apiServerHost)
if err != nil {
logrus.Error(err)
}
logrus.Infof("Checking availability of API server on %v", u.Hostname())
out, err := exec.Command("ping", u.Hostname(), "-c 5").Output()
if err != nil {
logrus.Error(err)
}
logrus.Infof(string(out))
}
func (k8s *K8s) initNamespace() {
var err error
nsmNamespace := namespace.GetNamespace()
k8s.namespace, err = k8s.CreateTestNamespace(nsmNamespace)
if err != nil {
k8s.checkAPIServerAvailable()
}
Expect(err).To(BeNil())
}
// Delete POD with completion check
// Make force delete on timeout
func (k8s *K8s) deletePods(pods ...*v1.Pod) error {
var wg sync.WaitGroup
var err error
for _, my_pod := range pods {
wg.Add(1)
pod := my_pod
go func() {
defer wg.Done()
delOpt := &metaV1.DeleteOptions{}
st := time.Now()
logrus.Infof("Deleting %v", pod.Name)
err = k8s.clientset.CoreV1().Pods(pod.Namespace).Delete(pod.Name, delOpt)
if err != nil {
logrus.Warnf(`The POD "%s" may continue to run on the cluster, %v`, pod.Name, err)
return
}
c, cancel := context.WithTimeout(context.Background(), podDeleteTimeout)
defer cancel()
err = blockUntilPodWorking(k8s.clientset, c, pod)
if err != nil {
err = k8s.deletePodForce(pod)
if err != nil {
logrus.Warnf(`The POD "%s" may continue to run on the cluster`, pod.Name)
logrus.Warn(err)
}
}
logrus.Warnf(`The POD "%s" Deleted %v`, pod.Name, time.Since(st))
}()
}
wg.Wait()
return err
}
func (k8s *K8s) deletePodsForce(pods ...*v1.Pod) error {
var err error
for _, pod := range pods {
err = k8s.deletePodForce(pod)
if err != nil {
logrus.Warnf(`The POD "%s" may continue to run on the cluster %v`, pod.Name, err)
}
}
return err
}
// GetVersion returns the k8s version
func (k8s *K8s) GetVersion() string {
version, err := k8s.clientset.Discovery().ServerVersion()
Expect(err).To(BeNil())
return fmt.Sprintf("%s", version)
}
// GetNodes returns the nodes
func (k8s *K8s) GetNodes() []v1.Node {
nodes, err := k8s.clientset.CoreV1().Nodes().List(metaV1.ListOptions{})
if err != nil {
k8s.checkAPIServerAvailable()
}
Expect(err).To(BeNil())
return nodes.Items
}
// ListPods lists the pods
func (k8s *K8s) ListPods() []v1.Pod {
podList, err := k8s.clientset.CoreV1().Pods(k8s.namespace).List(metaV1.ListOptions{})
Expect(err).To(BeNil())
return podList.Items
}
// CleanupCRDs cleans up CRDs
func (k8s *K8s) CleanupCRDs() {
// Clean up Network Services
services, _ := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServices(k8s.namespace).List(metaV1.ListOptions{})
for _, service := range services.Items {
_ = k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServices(k8s.namespace).Delete(service.Name, &metaV1.DeleteOptions{})
}
// Clean up Network Service Endpoints
endpoints, _ := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceEndpoints(k8s.namespace).List(metaV1.ListOptions{})
for _, ep := range endpoints.Items {
_ = k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceEndpoints(k8s.namespace).Delete(ep.Name, &metaV1.DeleteOptions{})
}
// Clean up Network Service Managers
managers, _ := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceManagers(k8s.namespace).List(metaV1.ListOptions{})
for _, mgr := range managers.Items {
_ = k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceManagers(k8s.namespace).Delete(mgr.Name, &metaV1.DeleteOptions{})
}
}
// DescribePod describes a pod
func (k8s *K8s) DescribePod(pod *v1.Pod) {
eventsInterface := k8s.clientset.CoreV1().Events(k8s.namespace)
selector := eventsInterface.GetFieldSelector(&pod.Name, &k8s.namespace, nil, nil)
options := metaV1.ListOptions{FieldSelector: selector.String()}
events, err := eventsInterface.List(options)
if err != nil {
logrus.Error(err)
}
for i := len(events.Items) - 1; i >= 0; i-- {
if pod.UID == events.Items[i].InvolvedObject.UID {
logrus.Info(events.Items[i])
}
}
}
// PrintImageVersion Prints image version pf pod.
func (k8s *K8s) PrintImageVersion(pod *v1.Pod) {
logs, err := k8s.GetLogs(pod, pod.Spec.Containers[0].Name)
Expect(err).Should(BeNil())
versionSubStr := "Version: "
index := strings.Index(logs, versionSubStr)
Expect(index == -1).ShouldNot(BeTrue())
index += len(versionSubStr)
builder := strings.Builder{}
for ; index < len(logs); index++ {
if logs[index] == '\n' {
break
}
err = builder.WriteByte(logs[index])
Expect(err).Should(BeNil())
}
version := builder.String()
Expect(strings.TrimSpace(version)).ShouldNot(Equal(""))
logrus.Infof("Version of %v is %v", pod.Name, version)
}
// CleanupEndpointsCRDs clean Network Service Endpoints from registry
func (k8s *K8s) CleanupEndpointsCRDs() {
endpoints, _ := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceEndpoints(k8s.namespace).List(metaV1.ListOptions{})
for i := range endpoints.Items {
_ = k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceEndpoints(k8s.namespace).Delete(endpoints.Items[i].Name, &metaV1.DeleteOptions{})
}
}
// Cleanup cleans up
func (k8s *K8s) Cleanup() {
st := time.Now()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
_ = k8s.deletePods(k8s.pods...)
}()
wg.Add(1)
go func() {
defer wg.Done()
k8s.CleanupCRDs()
}()
wg.Add(1)
go func() {
defer wg.Done()
k8s.CleanupConfigMaps()
}()
wg.Add(1)
go func() {
defer wg.Done()
_ = k8s.DeleteRoles(k8s.roles)
}()
wg.Wait()
k8s.pods = nil
_ = k8s.DeleteTestNamespace(k8s.namespace)
logrus.Infof("Cleanup time: %v", time.Since(st))
}
// Prepare prepares the pods
func (k8s *K8s) Prepare(noPods ...string) {
for _, podName := range noPods {
pods := k8s.ListPods()
for i := range pods {
lpod := &pods[i]
if strings.Contains(lpod.Name, podName) {
k8s.DeletePods(lpod)
}
}
}
}
// CreatePods create pods
func (k8s *K8s) CreatePods(templates ...*v1.Pod) []*v1.Pod {
pods, _ := k8s.CreatePodsRaw(PodStartTimeout, true, templates...)
return pods
}
// CreatePodsRaw create raw pods
func (k8s *K8s) CreatePodsRaw(timeout time.Duration, failTest bool, templates ...*v1.Pod) ([]*v1.Pod, error) {
results := k8s.createAndBlock(k8s.clientset, k8s.namespace, timeout, templates...)
pods := []*v1.Pod{}
// Add pods into managed list of created pods, do not matter about errors, since we still need to remove them.
errs := []error{}
for _, podResult := range results {
if podResult == nil {
logrus.Errorf("Error - Pod should have been created, but is nil: %v", podResult)
} else {
if podResult.pod != nil {
pods = append(pods, podResult.pod)
}
if podResult.err != nil {
logrus.Errorf("Error Creating Pod: %s %v", podResult.pod.Name, podResult.err)
errs = append(errs, podResult.err)
}
}
}
k8s.pods = append(k8s.pods, pods...)
// Make sure unit test is failed
var err error = nil
if failTest {
Expect(len(errs)).To(Equal(0))
} else {
// Lets construct error
err = fmt.Errorf("Errors %v", errs)
}
return pods, err
}
// GetPod gets a pod
func (k8s *K8s) GetPod(pod *v1.Pod) (*v1.Pod, error) {
return k8s.clientset.CoreV1().Pods(pod.Namespace).Get(pod.Name, metaV1.GetOptions{})
}
// CreatePod creates a pod
func (k8s *K8s) CreatePod(template *v1.Pod) *v1.Pod {
results, err := k8s.CreatePodsRaw(PodStartTimeout, true, template)
if err != nil || len(results) == 0 {
return nil
} else {
return results[0]
}
}
// DeletePods delete pods
func (k8s *K8s) DeletePods(pods ...*v1.Pod) {
err := k8s.deletePods(pods...)
Expect(err).To(BeNil())
for _, pod := range pods {
for idx, pod0 := range k8s.pods {
if pod.Name == pod0.Name {
k8s.pods = append(k8s.pods[:idx], k8s.pods[idx+1:]...)
}
}
}
}
// DeletePodsForce delete pods forcefully
func (k8s *K8s) DeletePodsForce(pods ...*v1.Pod) {
err := k8s.deletePodsForce(pods...)
Expect(err).To(BeNil())
for _, pod := range pods {
for idx, pod0 := range k8s.pods {
if pod.Name == pod0.Name {
k8s.pods = append(k8s.pods[:idx], k8s.pods[idx+1:]...)
}
}
}
}
// GetLogsChannel returns logs channel from pod with the given options
func (k8s *K8s) GetLogsChannel(ctx context.Context, pod *v1.Pod, options *v1.PodLogOptions) (chan string, chan error) {
linesChan := make(chan string, 1)
errChan := make(chan error, 1)
go func() {
defer close(linesChan)
defer close(errChan)
reader, err := k8s.clientset.CoreV1().Pods(k8s.namespace).GetLogs(pod.Name, options).Stream()
if err != nil {
logrus.Errorf("Failed to get logs from %v", pod.Name)
errChan <- err
return
}
defer func() { _ = reader.Close() }()
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
select {
case <-ctx.Done():
return
case linesChan <- scanner.Text():
}
}
errChan <- scanner.Err()
}()
return linesChan, errChan
}
// GetLogsWithOptions returns logs collected from pod with the given options
func (k8s *K8s) GetLogsWithOptions(pod *v1.Pod, options *v1.PodLogOptions) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), podGetLogTimeout)
defer cancel()
var builder strings.Builder
for linesChan, errChan := k8s.GetLogsChannel(ctx, pod, options); ; {
select {
case line := <-linesChan:
_, _ = builder.WriteString(line)
_, _ = builder.WriteString("\n")
case err := <-errChan:
return builder.String(), err
}
}
}
// GetLogs returns logs collected from pod::container
func (k8s *K8s) GetLogs(pod *v1.Pod, container string) (string, error) {
return k8s.GetLogsWithOptions(pod, &v1.PodLogOptions{
Container: container,
})
}
// WaitLogsContains waits with timeout for pod::container logs to contain pattern as substring
func (k8s *K8s) WaitLogsContains(pod *v1.Pod, container, pattern string, timeout time.Duration) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
matcher := func(s string) bool {
return strings.Contains(s, pattern)
}
description := fmt.Sprintf("Timeout waiting for logs pattern %v in %v::%v.", pattern, pod.Name, container)
k8s.waitLogsMatch(ctx, pod, container, matcher, description)
}
// WaitLogsContainsRegex waits with timeout for pod::contained logs to contain substring matching regexp pattern
func (k8s *K8s) WaitLogsContainsRegex(pod *v1.Pod, container, pattern string, timeout time.Duration) error {
r, err := regexp.Compile(pattern)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
matcher := func(s string) bool {
return r.FindStringSubmatch(s) != nil
}
description := fmt.Sprintf("Timeout waiting for logs matching regexp %v in %v::%v.", pattern, pod.Name, container)
k8s.waitLogsMatch(ctx, pod, container, matcher, description)
return nil
}
func (k8s *K8s) waitLogsMatch(ctx context.Context, pod *v1.Pod, container string, matcher func(string) bool, description string) {
options := &v1.PodLogOptions{
Container: container,
Follow: true,
}
var builder strings.Builder
for linesChan, errChan := k8s.GetLogsChannel(ctx, pod, options); ; {
select {
case err := <-errChan:
if err != nil {
logrus.Warnf("Error on get logs: %v retrying", err)
} else {
logrus.Warnf("Reached end of logs for %v::%v", pod.GetName(), container)
}
<-time.After(100 * time.Millisecond)
linesChan, errChan = k8s.GetLogsChannel(ctx, pod, options)
case line := <-linesChan:
_, _ = builder.WriteString(line)
_, _ = builder.WriteString("\n")
if matcher(line) {
return
}
case <-ctx.Done():
logrus.Errorf("%v Last logs: %v", description, builder.String())
Expect(false).To(BeTrue())
return
}
}
}
// UpdatePod updates a pod
func (k8s *K8s) UpdatePod(pod *v1.Pod) *v1.Pod {
pod, error := k8s.clientset.CoreV1().Pods(pod.Namespace).Get(pod.Name, metaV1.GetOptions{})
Expect(error).To(BeNil())
return pod
}
// GetClientSet returns a clientset
func (k8s *K8s) GetClientSet() (kubernetes.Interface, error) {
return k8s.clientset, nil
}
// GetConfig returns config
func (k8s *K8s) GetConfig() *rest.Config {
return k8s.config
}
func isNodeReady(node *v1.Node) bool {
for _, c := range node.Status.Conditions {
if c.Type == v1.NodeReady {
resultValue := c.Status == v1.ConditionTrue
return resultValue
}
}
return false
}
// GetNodesWait wait for required number of nodes are up and running fine
func (k8s *K8s) GetNodesWait(requiredNumber int, timeout time.Duration) []v1.Node {
st := time.Now()
warnPrinted := false
for {
nodes := k8s.GetNodes()
ready := 0
for i := range nodes {
node := &nodes[i]
logrus.Infof("Checking node: %s", node.Name)
if isNodeReady(node) {
ready++
}
}
if ready >= requiredNumber {
return nodes
}
since := time.Since(st)
if since > timeout {
Expect(len(nodes)).To(Equal(requiredNumber))
}
if since > timeout/10 && !warnPrinted {
logrus.Warnf("Waiting for %d nodes to arrive, currently have: %d", requiredNumber, len(nodes))
warnPrinted = true
}
time.Sleep(50 * time.Millisecond)
}
}
// CreateService creates a service
func (k8s *K8s) CreateService(service *v1.Service, namespace string) (*v1.Service, error) {
_ = k8s.clientset.CoreV1().Services(namespace).Delete(service.Name, &metaV1.DeleteOptions{})
s, err := k8s.clientset.CoreV1().Services(namespace).Create(service)
if err != nil {
logrus.Errorf("Error creating service: %v %v", s, err)
}
logrus.Infof("Service is created: %v", s)
return s, err
}
// DeleteService deletes a service
func (k8s *K8s) DeleteService(service *v1.Service, namespace string) error {
return k8s.clientset.CoreV1().Services(namespace).Delete(service.GetName(), &metaV1.DeleteOptions{})
}
// CleanupServices cleans up services
func (k8s *K8s) CleanupServices(services ...string) {
for _, s := range services {
_ = k8s.clientset.CoreV1().Services(k8s.namespace).Delete(s, &metaV1.DeleteOptions{})
}
}
// CreateDeployment creates deployment
func (k8s *K8s) CreateDeployment(deployment *appsv1.Deployment, namespace string) (*appsv1.Deployment, error) {
d, err := k8s.clientset.AppsV1().Deployments(namespace).Create(deployment)
if err != nil {
logrus.Errorf("Error creating deployment: %v %v", d, err)
}
logrus.Infof("Deployment is created: %v", d)
return d, err
}
// DeleteDeployment deletes deployment
func (k8s *K8s) DeleteDeployment(deployment *appsv1.Deployment, namespace string) error {
return k8s.clientset.AppsV1().Deployments(namespace).Delete(deployment.GetName(), &metaV1.DeleteOptions{})
}
// CleanupDeployments cleans up deployment
func (k8s *K8s) CleanupDeployments() {
deployments, _ := k8s.clientset.AppsV1().Deployments(k8s.namespace).List(metaV1.ListOptions{})
for i := range deployments.Items {
d := &deployments.Items[i]
err := k8s.DeleteDeployment(d, k8s.namespace)
if err != nil {
logrus.Errorf("An error during deployment deleting %v", err)
}
}
}
// CreateMutatingWebhookConfiguration creates mutating webhook with configuration
func (k8s *K8s) CreateMutatingWebhookConfiguration(mutatingWebhookConf *arv1beta1.MutatingWebhookConfiguration) (*arv1beta1.MutatingWebhookConfiguration, error) {
awc, err := k8s.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(mutatingWebhookConf)
if err != nil {
logrus.Errorf("Error creating MutatingWebhookConfiguration: %v %v", awc, err)
}
logrus.Infof("MutatingWebhookConfiguration is created: %v", awc)
return awc, err
}
// DeleteMutatingWebhookConfiguration deletes mutating webhook with configuration
func (k8s *K8s) DeleteMutatingWebhookConfiguration(mutatingWebhookConf *arv1beta1.MutatingWebhookConfiguration) error {
return k8s.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingWebhookConf.GetName(), &metaV1.DeleteOptions{})
}
// CleanupMutatingWebhookConfigurations cleans mutating webhook with configuration
func (k8s *K8s) CleanupMutatingWebhookConfigurations() {
mwConfigs, _ := k8s.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(metaV1.ListOptions{})
for _, mwConfig := range mwConfigs.Items {
mwConfig := mwConfig
err := k8s.DeleteMutatingWebhookConfiguration(&mwConfig)
if err != nil {
logrus.Errorf("Error cleaning up mutating webhook configurations: %v", err)
}
}
}
// CreateSecret creates a secret
func (k8s *K8s) CreateSecret(secret *v1.Secret, namespace string) (*v1.Secret, error) {
s, err := k8s.clientset.CoreV1().Secrets(namespace).Create(secret)
if err != nil {
logrus.Errorf("Error creating secret: %v %v", s, err)
}
logrus.Infof("secret is created: %v", s)
return s, err
}
// DeleteSecret deletes a secret
func (k8s *K8s) DeleteSecret(name, namespace string) error {
return k8s.clientset.CoreV1().Secrets(namespace).Delete(name, &metaV1.DeleteOptions{})
}
// CleanupSecrets cleans a secret
func (k8s *K8s) CleanupSecrets(secrets ...string) {
for _, s := range secrets {
_ = k8s.DeleteSecret(s, k8s.namespace)
}
}
// IsPodReady returns if a pod is ready
func (k8s *K8s) IsPodReady(pod *v1.Pod) bool {
return isPodReady(pod)
}
// CreateConfigMap creates a configmap
func (k8s *K8s) CreateConfigMap(cm *v1.ConfigMap) (*v1.ConfigMap, error) {
return k8s.clientset.CoreV1().ConfigMaps(cm.Namespace).Create(cm)
}
// CleanupConfigMaps cleans a configmap
func (k8s *K8s) CleanupConfigMaps() {
// Clean up Network Service Endpoints
configMaps, _ := k8s.clientset.CoreV1().ConfigMaps(k8s.namespace).List(metaV1.ListOptions{})
for _, cm := range configMaps.Items {
_ = k8s.clientset.CoreV1().ConfigMaps(k8s.namespace).Delete(cm.Name, &metaV1.DeleteOptions{})
}
}
// CreateTestNamespace creates a test namespace
func (k8s *K8s) CreateTestNamespace(namespace string) (string, error) {
if len(namespace) == 0 || namespace == "default" {
return "default", nil
}
nsTemplate := &v1.Namespace{
ObjectMeta: metaV1.ObjectMeta{
GenerateName: namespace + "-",
},
}
nsNamespace, err := k8s.clientset.CoreV1().Namespaces().Create(nsTemplate)
if err != nil {
nsRes := ""
if strings.Contains(err.Error(), "already exists") {
nsRes = namespace
}
return nsRes, fmt.Errorf("failed to create a namespace (error: %v)", err)
}
logrus.Printf("namespace %v is created", nsNamespace.GetName())
return nsNamespace.GetName(), nil
}
// DeleteTestNamespace deletes a test namespace
func (k8s *K8s) DeleteTestNamespace(namespace string) error {
if namespace == "default" {
return nil
}
var immediate int64
err := k8s.clientset.CoreV1().Namespaces().Delete(namespace, &metaV1.DeleteOptions{GracePeriodSeconds: &immediate})
if err != nil {
return fmt.Errorf("failed to delete namespace %q (error: %v)", namespace, err)
}
logrus.Printf("namespace %v is deleted", namespace)
return nil
}
// GetNamespace returns a namespace
func (k8s *K8s) GetNamespace(namespace string) (*v1.Namespace, error) {
ns, err := k8s.clientset.CoreV1().Namespaces().Get(namespace, metaV1.GetOptions{})
if err != nil {
err = fmt.Errorf("failed to get namespace %q (error: %v)", namespace, err)
}
return ns, err
}
// GetK8sNamespace returns a namespace
func (k8s *K8s) GetK8sNamespace() string {
return k8s.namespace
}
// CreateRoles create roles
func (k8s *K8s) CreateRoles(rolesList ...string) ([]nsmrbac.Role, error) {
createdRoles := []nsmrbac.Role{}
for _, kind := range rolesList {
role := nsmrbac.Roles[kind](nsmrbac.RoleNames[kind], k8s.GetK8sNamespace())
err := role.Create(k8s.clientset)
if err != nil {
logrus.Errorf("failed creating role: %v %v", role, err)
return createdRoles, err
} else {
logrus.Infof("role is created: %v", role)
createdRoles = append(createdRoles, role)
}
}
return createdRoles, nil
}
// DeleteRoles delete roles
func (k8s *K8s) DeleteRoles(rolesList []nsmrbac.Role) error {
for i := range rolesList {
err := rolesList[i].Delete(k8s.clientset, rolesList[i].GetName())
if err != nil {
logrus.Errorf("failed deleting role: %v %v", rolesList[i], err)
return err
}
}
return nil
}
// setIPVersion choose whether or not to use IPv6 in testing
func (k8s *K8s) setIPVersion() {
useIPv6, ok := os.LookupEnv(envUseIPv6)
if !ok {
logrus.Infof("%s not set, using default %t", envUseIPv6, envUseIPv6Default)
k8s.useIPv6 = envUseIPv6Default
} else {
k8s.useIPv6, _ = strconv.ParseBool(useIPv6)
}
}
// UseIPv6 returns which IP version is going to be used in testing
func (k8s *K8s) UseIPv6() bool {
return k8s.useIPv6
}
// setForwardingPlane sets which forwarding plane to be used in testing
func (k8s *K8s) setForwardingPlane() {
plane, ok := os.LookupEnv(pods.EnvForwardingPlane)
if !ok {
logrus.Infof("%s not set, using default dataplane - %s", pods.EnvForwardingPlane, pods.EnvForwardingPlaneDefault)
k8s.forwardingPlane = pods.EnvForwardingPlaneDefault
} else {
logrus.Infof("%s set to: %s", pods.EnvForwardingPlane, plane)
k8s.forwardingPlane = plane
}
}
// GetForwardingPlane gets which forwarding plane is going to be used in testing
func (k8s *K8s) GetForwardingPlane() string {
return k8s.forwardingPlane
}
// GetNSEs returns existing 'nse' resources
func (k8s *K8s) GetNSEs() ([]v1alpha1.NetworkServiceEndpoint, error) {
nseList, err := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceEndpoints("default").List(metaV1.ListOptions{})
if err != nil {
return nil, err
}
return nseList.Items, err
}
// DeleteNSEs deletes 'nse' resources by names
func (k8s *K8s) DeleteNSEs(names ...string) error {
nseClient := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServiceEndpoints("default")
for _, name := range names {
if err := nseClient.Delete(name, &metaV1.DeleteOptions{}); err != nil {
return err
}
}
return nil
}
// GetNetworkServices returns existing 'networkservice' resources
func (k8s *K8s) GetNetworkServices() ([]v1alpha1.NetworkService, error) {
networkServiceList, err := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServices("default").List(metaV1.ListOptions{})
if err != nil {
return nil, err
}
return networkServiceList.Items, err
}
// DeleteNetworkServices deletes 'networkservice' resources by names
func (k8s *K8s) DeleteNetworkServices(names ...string) error {
networkServiceClient := k8s.versionedClientSet.NetworkservicemeshV1alpha1().NetworkServices("default")
for _, name := range names {
if err := networkServiceClient.Delete(name, &metaV1.DeleteOptions{}); err != nil {
return err
}
}
return nil
}
| [
"\"KUBECONFIG\"",
"\"HOME\""
] | [] | [
"HOME",
"KUBECONFIG"
] | [] | ["HOME", "KUBECONFIG"] | go | 2 | 0 | |
examples/service/chat/role/page/role_page_example.go | package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v2 "github.com/RJPearson94/twilio-sdk-go/service/chat/v2"
"github.com/RJPearson94/twilio-sdk-go/service/chat/v2/service/roles"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var chatClient *v2.Chat
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
chatClient = twilio.NewWithCredentials(creds).Chat.V2
}
func main() {
resp, err := chatClient.
Service("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Roles.
Page(&roles.RolesPageOptions{})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("%v role(s) found on page", len(resp.Roles))
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] | [] | [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] | [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | go | 2 | 0 | |
cmd/cnosdb/run/run.go | package run
import (
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/cnosdb/cnosdb/cmd/cnosdb/options"
"github.com/cnosdb/cnosdb/pkg/logger"
"github.com/cnosdb/cnosdb/server"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
var run_examples = ` cnosdb run
cnosdb`
func GetCommand() *cobra.Command {
c := &cobra.Command{
Use: "run",
Short: "run node with existing configuration",
Long: "Runs the CnosDB server.",
Example: run_examples,
CompletionOptions: cobra.CompletionOptions{
DisableDefaultCmd: true,
DisableDescriptions: true,
DisableNoDescFlag: true,
},
PreRun: func(cmd *cobra.Command, args []string) {
if err := logger.InitZapLogger(logger.NewDefaultLogConfig()); err != nil {
fmt.Println("Unable to configure logger.")
}
},
Run: func(cmd *cobra.Command, args []string) {
config, err := ParseConfig(options.Env.GetConfigPath())
if err != nil {
fmt.Printf("parse config: %s\n", err)
}
if err := logger.InitZapLogger(config.Log); err != nil {
fmt.Printf("parse log config: %s\n", err)
}
d := &CnosDB{
Server: server.NewServer(config),
Logger: logger.BgLogger(),
}
if err := d.Server.Open(); err != nil {
fmt.Printf("open server: %s\n", err)
}
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// 直到接收到指定信号为止,保持阻塞
<-signalCh
select {
case <-signalCh:
fmt.Println("Second signal received, initializing hard shutdown")
case <-time.After(time.Second * 30):
fmt.Println("Time limit reached, initializing hard shutdown")
}
},
}
return c
}
type CnosDB struct {
Server *server.Server
Logger *zap.Logger
}
// ParseConfig parses the config at path.
// It returns a demo configuration if path is blank.
func ParseConfig(path string) (*server.Config, error) {
// Use demo configuration if no config path is specified.
if path == "" {
logger.BgLogger().Info("No configuration provided, using default settings")
if config, err := server.NewDemoConfig(); err != nil {
return config, err
} else {
if err := config.ApplyEnvOverrides(os.Getenv); err != nil {
return config, fmt.Errorf("apply env config: %v", err)
}
return config, err
}
}
logger.BgLogger().Info("Loading configuration file", zap.String("path", path))
config := server.NewConfig()
if err := config.FromTomlFile(path); err != nil {
return nil, err
}
if err := config.ApplyEnvOverrides(os.Getenv); err != nil {
return config, fmt.Errorf("apply env config: %v", err)
}
return config, nil
}
| [] | [] | [] | [] | [] | go | 0 | 0 | |
test/minikube/minikube_domain_resync_test.go | // +build long
// tests in this file require NuoDB 4.0.7 or newer
package minikube
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"testing"
"time"
v12 "k8s.io/api/core/v1"
"github.com/stretchr/testify/assert"
"github.com/nuodb/nuodb-helm-charts/test/testlib"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func getStatefulSets(t *testing.T, namespaceName string) *appsv1.StatefulSetList {
options := k8s.NewKubectlOptions("", "", namespaceName)
clientset, err := k8s.GetKubernetesClientFromOptionsE(t, options)
assert.NoError(t, err)
statefulSets, err := clientset.AppsV1().StatefulSets(namespaceName).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
return statefulSets
}
func getGlobalLoadBalancerConfigE(t *testing.T, loadBalancerConfigs []testlib.NuoDBLoadBalancerConfig) (*testlib.NuoDBLoadBalancerConfig, error) {
for _, config := range loadBalancerConfigs {
if config.IsGlobal {
return &config, nil
}
}
return nil, errors.New("Unable to find global load balancer configuration")
}
func getDatabaseLoadBalancerConfigE(t *testing.T, dbName string, loadBalancerConfigs []testlib.NuoDBLoadBalancerConfig) (*testlib.NuoDBLoadBalancerConfig, error) {
for _, config := range loadBalancerConfigs {
if config.DbName == dbName {
return &config, nil
}
}
return nil, errors.New("Unable to find load balancer configuration for database=" + dbName)
}
func verifyProcessLabels(t *testing.T, namespaceName string, adminPod string) (archiveVolumeClaims map[string]int) {
options := k8s.NewKubectlOptions("", "", namespaceName)
output, err := k8s.RunKubectlAndGetOutputE(t, options, "exec", adminPod, "--",
"nuocmd", "--show-json", "get", "processes", "--db-name", "demo")
assert.NoError(t, err, output)
err, objects := testlib.Unmarshal(output)
assert.NoError(t, err, output)
archiveVolumeClaims = make(map[string]int)
for _, obj := range objects {
podName, ok := obj.Labels["pod-name"]
assert.True(t, ok)
// check that Pod exists
pod := k8s.GetPod(t, options, podName)
containerId, ok := obj.Labels["container-id"]
assert.True(t, ok)
// check that Pod has container ID
for _, containerStatus := range pod.Status.ContainerStatuses {
assert.Equal(t, "docker://"+containerId, containerStatus.ContainerID)
}
claimName, ok := obj.Labels["archive-pvc"]
if ok {
assert.Equal(t, "SM", obj.Type, "archive-pvc label should only be present for SMs")
// check that PVC exists
k8s.RunKubectl(t, options, "get", "pvc", claimName)
// add mapping of PVC to archive ID
output, err = k8s.RunKubectlAndGetOutputE(t, options, "exec", adminPod, "--",
"nuocmd", "get", "value", "--key", "archiveVolumeClaims/"+claimName)
assert.NoError(t, err)
archiveId, err := strconv.Atoi(strings.TrimSpace(output))
assert.NoError(t, err)
archiveVolumeClaims[claimName] = archiveId
} else {
assert.Equal(t, "TE", obj.Type, "archive-pvc label should only be absent for TEs")
}
}
return archiveVolumeClaims
}
func verifyLoadBalancer(t *testing.T, namespaceName string, adminPod string, deploymentOptions map[string]string) {
actualLoadBalancerConfigurations, err := testlib.GetLoadBalancerConfigE(t, namespaceName, adminPod)
assert.NoError(t, err)
actualLoadBalancerPolicies, err := testlib.GetLoadBalancerPoliciesE(t, namespaceName, adminPod)
assert.NoError(t, err)
actualGlobalConfig, err := getGlobalLoadBalancerConfigE(t, actualLoadBalancerConfigurations)
assert.NoError(t, err)
actualDatabaseConfig, err := getDatabaseLoadBalancerConfigE(t, "demo", actualLoadBalancerConfigurations)
assert.NoError(t, err)
configuredPolicies := len(deploymentOptions)
for opt, val := range deploymentOptions {
t.Logf("Asserting deployment option %s with value %s", opt, val)
if strings.HasPrefix(opt, "admin.lbConfig.policies.") {
// Verify that named policies are configured properly
policyName := opt[strings.LastIndex(opt, ".")+1:]
actualPolicy, ok := actualLoadBalancerPolicies[policyName]
if assert.True(t, ok, "Unable to find named policy="+policyName) {
assert.Equal(t, val, actualPolicy.LbQuery)
}
} else if opt == "admin.lbConfig.prefilter" {
if actualGlobalConfig != nil {
assert.Equal(t, val, actualGlobalConfig.Prefilter)
}
} else if opt == "admin.lbConfig.default" {
if actualGlobalConfig != nil {
assert.Equal(t, val, actualGlobalConfig.DefaultLbQuery)
}
} else if opt == "database.lbConfig.prefilter" {
if actualDatabaseConfig != nil {
assert.Equal(t, val, actualDatabaseConfig.Prefilter)
}
} else if opt == "database.lbConfig.default" {
if actualDatabaseConfig != nil {
assert.Equal(t, val, actualDatabaseConfig.DefaultLbQuery)
}
} else {
t.Logf("Deployment option %s skipped", opt)
configuredPolicies--
}
}
if deploymentOptions["admin.lbConfig.fullSync"] == "true" {
// Verify that named policies match configured number of policies
t.Logf("Asserting load-balancer policies count is equal to configured policies via Helm")
assert.Equal(t, configuredPolicies, len(actualLoadBalancerPolicies))
}
}
func checkArchives(t *testing.T, namespaceName string, adminPod string, numExpected int, numExpectedRemoved int) (archives []testlib.NuoDBArchive, removedArchives []testlib.NuoDBArchive) {
options := k8s.NewKubectlOptions("", "", namespaceName)
// check archives
output, err := k8s.RunKubectlAndGetOutputE(t, options, "exec", adminPod, "--",
"nuocmd", "--show-json", "get", "archives", "--db-name", "demo")
assert.NoError(t, err, output)
err, archives = testlib.UnmarshalArchives(output)
assert.NoError(t, err)
assert.Equal(t, numExpected, len(archives), output)
// check removed archives
output, err = k8s.RunKubectlAndGetOutputE(t, options, "exec", adminPod, "--",
"nuocmd", "--show-json", "get", "archives", "--db-name", "demo", "--removed")
assert.NoError(t, err, output)
err, removedArchives = testlib.UnmarshalArchives(output)
assert.NoError(t, err)
assert.Equal(t, numExpectedRemoved, len(removedArchives), output)
return
}
func checkInitialMembership(t assert.TestingT, configJson string, expectedSize int) {
type initialMembershipEntry struct {
Transport string `json:"transport"`
Version string `json:"version"`
}
var adminConfig struct {
InitialMembership map[string]initialMembershipEntry `json:"initialMembership"`
}
dec := json.NewDecoder(strings.NewReader(configJson))
err := dec.Decode(&adminConfig)
if err != io.EOF {
assert.NoError(t, err, "Unable to deserialize admin config")
}
assert.Equal(t, expectedSize, len(adminConfig.InitialMembership))
}
func TestReprovisionAdmin0(t *testing.T) {
testlib.AwaitTillerUp(t)
defer testlib.VerifyTeardown(t)
defer testlib.Teardown(testlib.TEARDOWN_ADMIN)
helmChartReleaseName, namespaceName := testlib.StartAdmin(t, &helm.Options{
SetValues: map[string]string{
"admin.replicas": "2",
"admin.bootstrapServers": "2",
},
}, 2, "")
adminStatefulSet := helmChartReleaseName + "-nuodb-cluster0"
admin0 := adminStatefulSet + "-0"
admin1 := adminStatefulSet + "-1"
// get OLD logs
go testlib.GetAppLog(t, namespaceName, admin0, "-previous", &v12.PodLogOptions{Follow: true})
// check initial membership on admin-0
options := k8s.NewKubectlOptions("", "", namespaceName)
output, err := k8s.RunKubectlAndGetOutputE(t, options, "exec", admin0, "--",
"nuocmd", "--show-json", "get", "server-config", "--this-server")
assert.NoError(t, err, output)
checkInitialMembership(t, output, 2)
// check initial membership on admin-1
output, err = k8s.RunKubectlAndGetOutputE(t, options, "exec", admin1, "--",
"nuocmd", "--show-json", "get", "server-config", "--this-server")
assert.NoError(t, err, output)
checkInitialMembership(t, output, 2)
// store a value in the KV store via admin-0
k8s.RunKubectl(t, options, "exec", admin0, "--",
"nuocmd", "set", "value", "--key", "testKey", "--value", "0", "--unconditional")
// save the original Pod object
originalPod := k8s.GetPod(t, options, admin0)
// delete Raft data and Pod for admin-0
k8s.RunKubectl(t, options, "exec", admin0, "--",
"bash", "-c", "rm $NUODB_VARDIR/raftlog")
k8s.RunKubectl(t, options, "delete", "pod", admin0)
// wait until the Pod is rescheduled
testlib.AwaitPodObjectRecreated(t, namespaceName, originalPod, 300*time.Second)
testlib.AwaitPodUp(t, namespaceName, admin0, 300*time.Second)
// make sure admin0 rejoins
k8s.RunKubectl(t, options, "exec", admin1, "--",
"nuocmd", "check", "servers", "--check-connected", "--num-servers", "2", "--check-leader", "--timeout", "300")
k8s.RunKubectl(t, options, "exec", admin0, "--",
"nuocmd", "check", "servers", "--check-connected", "--num-servers", "2", "--check-leader", "--timeout", "300")
// conditionally update value in the KV store via admin-0; if admin-0
// rejoined with admin-1 rather than bootstrapping a new domain, then it
// should have the current value
k8s.RunKubectl(t, options, "exec", admin0, "--",
"nuocmd", "set", "value", "--key", "testKey", "--value", "1", "--expected-value", "0")
// conditionally update value in the KV store via admin-1
k8s.RunKubectl(t, options, "exec", admin1, "--",
"nuocmd", "set", "value", "--key", "testKey", "--value", "2", "--expected-value", "1")
}
func TestAdminScaleDown(t *testing.T) {
testlib.AwaitTillerUp(t)
defer testlib.VerifyTeardown(t)
defer testlib.Teardown(testlib.TEARDOWN_ADMIN)
helmChartReleaseName, namespaceName := testlib.StartAdmin(t, &helm.Options{
SetValues: map[string]string{
"admin.replicas": "2",
},
}, 2, "")
adminStatefulSet := helmChartReleaseName + "-nuodb-cluster0"
admin0 := adminStatefulSet + "-0"
admin1 := adminStatefulSet + "-1"
// get OLD logs
go testlib.GetAppLog(t, namespaceName, admin1, "-previous", &v12.PodLogOptions{Follow: true})
// scale down Admin StatefulSet
options := k8s.NewKubectlOptions("", "", namespaceName)
k8s.RunKubectl(t, options, "scale", "statefulset", adminStatefulSet, "--replicas=1")
// wait for scaled-down Admin to show as "Disconnected"
testlib.Await(t, func() bool {
output, _ := k8s.RunKubectlAndGetOutputE(t, options, "exec", admin0, "--",
"nuocmd", "show", "domain", "--server-format", "{id} {connected_state}")
return strings.Contains(output, admin1+" Disconnected")
}, 300*time.Second)
// wait for scaled-down Admin Pod to be deleted
testlib.AwaitNoPods(t, namespaceName, admin1)
// commit a Raft command to confirm that remaining Admin has consensus
k8s.RunKubectl(t, options, "exec", admin0, "--",
"nuocmd", "set", "value", "--key", "testKey", "--value", "testValue", "--unconditional")
// admin1 is still in membership, though it is excluded from consensus;
// delete PVC to cause it to be completely removed from the membership;
// this should allow the Admin health-check to succeed
k8s.RunKubectl(t, options, "delete", "pvc", "raftlog-"+admin1)
k8s.RunKubectl(t, options, "exec", admin0, "--",
"nuocmd", "check", "servers", "--check-connected", "--num-servers", "1", "--check-leader", "--timeout", "300")
// scale up Admin StatefulSet and make sure admin1 rejoins
k8s.RunKubectl(t, options, "scale", "statefulset", adminStatefulSet, "--replicas=2")
k8s.RunKubectl(t, options, "exec", admin0, "--",
"nuocmd", "check", "servers", "--check-connected", "--num-servers", "2", "--check-leader", "--timeout", "300")
k8s.RunKubectl(t, options, "exec", admin1, "--",
"nuocmd", "check", "servers", "--check-connected", "--num-servers", "2", "--check-leader", "--timeout", "300")
}
func TestDomainResync(t *testing.T) {
if os.Getenv("NUODB_LICENSE") != "ENTERPRISE" {
t.Skip("Cannot test resync without the Enterprise Edition")
}
testlib.AwaitTillerUp(t)
defer testlib.VerifyTeardown(t)
defer testlib.Teardown(testlib.TEARDOWN_ADMIN)
helmChartReleaseName, namespaceName := testlib.StartAdmin(t, &helm.Options{}, 1, "")
admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", helmChartReleaseName)
defer testlib.Teardown(testlib.TEARDOWN_DATABASE) // ensure resources allocated in called functions are released when this function exits
testlib.StartDatabase(t, namespaceName, admin0, &helm.Options{
SetValues: map[string]string{
"database.sm.resources.requests.cpu": "0.25",
"database.sm.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
"database.te.resources.requests.cpu": "0.25",
"database.te.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
},
})
originalArchiveVolumeClaims := verifyProcessLabels(t, namespaceName, admin0)
assert.Equal(t, 1, len(originalArchiveVolumeClaims))
originalArchiveId := -1
for _, archiveId := range originalArchiveVolumeClaims {
originalArchiveId = archiveId
}
assert.True(t, originalArchiveId != -1)
// update replica count
options := k8s.NewKubectlOptions("", "", namespaceName)
statefulSets := getStatefulSets(t, namespaceName).Items
assert.Equal(t, 3, len(statefulSets), "Expected 3 StatefulSets: Admin, SM, and hotcopy SM")
// by default the hotcopy SM replica count is 1 and regular SM count is 0
// scale regular SM replica count up to 1
smStatefulSet := ""
for _, statefulSet := range statefulSets {
name := statefulSet.Name
if strings.HasPrefix(name, "sm-") && !strings.Contains(name, "hotcopy") {
k8s.RunKubectl(t, options, "scale", "statefulset", name, "--replicas=1")
smStatefulSet = name
}
}
assert.True(t, smStatefulSet != "")
testlib.AwaitDatabaseUp(t, namespaceName, admin0, "demo", 3)
checkArchives(t, namespaceName, admin0, 2, 0)
// scale hotcopy SM replica count down to 0
hotCopySmStatefulSet := ""
for _, statefulSet := range statefulSets {
name := statefulSet.Name
if strings.Contains(name, "hotcopy") {
k8s.RunKubectl(t, options, "scale", "statefulset", name, "--replicas=0")
hotCopySmStatefulSet = name
}
}
assert.True(t, hotCopySmStatefulSet != "")
testlib.AwaitDatabaseUp(t, namespaceName, admin0, "demo", 2)
// check that archive ID generated by hotcopy SM was removed
_, removedArchives := checkArchives(t, namespaceName, admin0, 1, 1)
assert.Equal(t, originalArchiveId, removedArchives[0].Id)
// scale hotcopy SM replica count back up to 1; the removed archive ID should be resurrected
k8s.RunKubectl(t, options, "scale", "statefulset", hotCopySmStatefulSet, "--replicas=1")
testlib.AwaitDatabaseUp(t, namespaceName, admin0, "demo", 3)
checkArchives(t, namespaceName, admin0, 2, 0)
// scale hotcopy SM replica count back down to 0
k8s.RunKubectl(t, options, "scale", "statefulset", hotCopySmStatefulSet, "--replicas=0")
testlib.AwaitDatabaseUp(t, namespaceName, admin0, "demo", 2)
checkArchives(t, namespaceName, admin0, 1, 1)
// explicitly delete the scaled-down PVC and make sure the archive ID is purged
for claimName, _ := range originalArchiveVolumeClaims {
k8s.RunKubectl(t, options, "delete", "pvc", claimName)
}
testlib.Await(t, func() bool {
checkArchives(t, namespaceName, admin0, 1, 0)
return true
}, 300*time.Second)
}
func TestLoadBalancerConfigurationFullResync(t *testing.T) {
testlib.AwaitTillerUp(t)
defer testlib.VerifyTeardown(t)
defer testlib.Teardown(testlib.TEARDOWN_ADMIN)
options := &helm.Options{
SetValues: map[string]string{
"admin.lbConfig.prefilter": "not(label(region tiebreaker))",
"admin.lbConfig.default": "random(first(label(node node1) any))",
"admin.lbConfig.policies.zone1": "round_robin(first(label(zone zone1) any))",
"admin.lbConfig.policies.nearest": "random(first(label(pod ${pod:-}) label(node ${node:-}) label(zone ${zone:-}) any))",
"admin.lbConfig.fullSync": "true",
"database.sm.resources.requests.cpu": testlib.MINIMAL_VIABLE_ENGINE_CPU,
"database.sm.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
"database.te.resources.requests.cpu": testlib.MINIMAL_VIABLE_ENGINE_CPU,
"database.te.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
"database.lbConfig.prefilter": "not(label(zone DR))",
"database.lbConfig.default": "random(first(label(node ${NODE_NAME:-}) any))",
},
}
helmChartReleaseName, namespaceName := testlib.StartAdmin(t, options, 1, "")
admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", helmChartReleaseName)
defer testlib.Teardown(testlib.TEARDOWN_DATABASE) // ensure resources allocated in called functions are released when this function exits
testlib.StartDatabase(t, namespaceName, admin0, options)
// Configure one manual policy
// It should be deleted after next resync
k8s.RunKubectl(t, k8s.NewKubectlOptions("", "", namespaceName), "exec", admin0, "--",
"nuocmd", "set", "load-balancer", "--policy-name", "manual", "--lb-query", "random(any)")
// Wait for at least two triggered LB syncs and check expected configuration
testlib.AwaitNrLoadBalancerPolicies(t, namespaceName, admin0, 6)
verifyLoadBalancer(t, namespaceName, admin0, options.SetValues)
}
func TestLoadBalancerConfigurationResync(t *testing.T) {
testlib.AwaitTillerUp(t)
defer testlib.VerifyTeardown(t)
defer testlib.Teardown(testlib.TEARDOWN_ADMIN)
options := &helm.Options{
SetValues: map[string]string{
"admin.lbConfig.prefilter": "not(label(region tiebreaker))",
"admin.lbConfig.policies.zone1": "round_robin(first(label(zone zone1) any))",
"admin.lbConfig.policies.nearest": "random(first(label(pod ${pod:-}) label(node ${node:-}) label(zone ${zone:-}) any))",
"database.sm.resources.requests.cpu": testlib.MINIMAL_VIABLE_ENGINE_CPU,
"database.sm.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
"database.te.resources.requests.cpu": testlib.MINIMAL_VIABLE_ENGINE_CPU,
"database.te.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
"database.lbConfig.prefilter": "not(label(zone DR))",
"database.lbConfig.default": "random(first(label(node ${NODE_NAME:-}) any))",
},
}
helmChartReleaseName, namespaceName := testlib.StartAdmin(t, options, 1, "")
admin0 := fmt.Sprintf("%s-nuodb-cluster0-0", helmChartReleaseName)
defer testlib.Teardown(testlib.TEARDOWN_DATABASE) // ensure resources allocated in called functions are released when this function exits
testlib.StartDatabase(t, namespaceName, admin0, options)
// Configure one manual policy and global default expression
// By default "admin.lbConfig.fullSync" is set to false.
// Hence we are not deleting manual load balancer configuration but adding and updating existing config.
k8s.RunKubectl(t, k8s.NewKubectlOptions("", "", namespaceName), "exec", admin0, "--",
"nuocmd", "set", "load-balancer", "--policy-name", "manual", "--lb-query", "random(any)")
k8s.RunKubectl(t, k8s.NewKubectlOptions("", "", namespaceName), "exec", admin0, "--",
"nuocmd", "set", "load-balancer-config", "--default", "random(first(label(node node1) any))", "--is-global")
// Wait for at least two triggered LB syncs and check expected configuration
testlib.AwaitNrLoadBalancerPolicies(t, namespaceName, admin0, 7)
// Add manual configurations to the options so that they can be asserted
options.SetValues["admin.lbConfig.default"] = "random(first(label(node node1) any))"
options.SetValues["admin.lbConfig.policies.manual"] = "random(any)"
verifyLoadBalancer(t, namespaceName, admin0, options.SetValues)
}
| [
"\"NUODB_LICENSE\""
] | [] | [
"NUODB_LICENSE"
] | [] | ["NUODB_LICENSE"] | go | 1 | 0 | |
server/src/main/java/edp/core/common/jdbc/JdbcDataSource.java | /*
* <<
* Davinci
* ==
* Copyright (C) 2016 - 2019 EDP
* ==
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* >>
*
*/
package edp.core.common.jdbc;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.util.StringUtils;
import edp.core.consts.Consts;
import edp.core.enums.DataTypeEnum;
import edp.core.exception.SourceException;
import edp.core.model.JdbcSourceInfo;
import edp.core.utils.CollectionUtils;
import edp.core.utils.CustomDataSourceUtils;
import edp.core.utils.SourceUtils;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.io.File;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static edp.core.consts.Consts.JDBC_DATASOURCE_DEFAULT_VERSION;
@Slf4j
@Component
public class JdbcDataSource {
@Value("${source.max-active:8}")
@Getter
protected int maxActive;
@Value("${source.initial-size:0}")
@Getter
protected int initialSize;
@Value("${source.min-idle:1}")
@Getter
protected int minIdle;
@Value("${source.max-wait:60000}")
@Getter
protected long maxWait;
@Value("${source.time-between-eviction-runs-millis}")
@Getter
protected long timeBetweenEvictionRunsMillis;
@Value("${source.min-evictable-idle-time-millis}")
@Getter
protected long minEvictableIdleTimeMillis;
@Value("${source.max-evictable-idle-time-millis}")
@Getter
protected long maxEvictableIdleTimeMillis;
@Value("${source.time-between-connect-error-millis}")
@Getter
protected long timeBetweenConnectErrorMillis;
@Value("${source.test-while-idle}")
@Getter
protected boolean testWhileIdle;
@Value("${source.test-on-borrow}")
@Getter
protected boolean testOnBorrow;
@Value("${source.test-on-return}")
@Getter
protected boolean testOnReturn;
@Value("${source.break-after-acquire-failure:true}")
@Getter
protected boolean breakAfterAcquireFailure;
@Value("${source.connection-error-retry-attempts:1}")
@Getter
protected int connectionErrorRetryAttempts;
@Value("${source.keep-alive:false}")
@Getter
protected boolean keepAlive;
@Value("${source.validation-query-timeout:5}")
@Getter
protected int validationQueryTimeout;
@Value("${source.validation-query}")
@Getter
protected String validationQuery;
@Value("${source.filters}")
@Getter
protected String filters;
private static volatile Map<String, DruidDataSource> dataSourceMap = new ConcurrentHashMap<>();
private static volatile Map<String, Lock> dataSourceLockMap = new ConcurrentHashMap<>();
private static final Object lockLock = new Object();
private Lock getDataSourceLock(String key) {
if (dataSourceLockMap.containsKey(key)) {
return dataSourceLockMap.get(key);
}
synchronized (lockLock) {
if (dataSourceLockMap.containsKey(key)) {
return dataSourceLockMap.get(key);
}
Lock lock = new ReentrantLock();
dataSourceLockMap.put(key, lock);
return lock;
}
}
/**
* only for test
* @param jdbcSourceInfo
* @return
*/
public boolean isDataSourceExist(JdbcSourceInfo jdbcSourceInfo) {
return dataSourceMap.containsKey(getDataSourceKey(jdbcSourceInfo));
}
public void removeDatasource(JdbcSourceInfo jdbcSourceInfo) {
String key = getDataSourceKey(jdbcSourceInfo);
Lock lock = getDataSourceLock(key);
if (!lock.tryLock()) {
return;
}
try {
DruidDataSource druidDataSource = dataSourceMap.remove(key);
if (druidDataSource != null) {
druidDataSource.close();
}
dataSourceLockMap.remove(key);
}finally {
lock.unlock();
}
}
public DruidDataSource getDataSource(JdbcSourceInfo jdbcSourceInfo) throws SourceException {
String jdbcUrl = jdbcSourceInfo.getJdbcUrl();
String username = jdbcSourceInfo.getUsername();
String password = jdbcSourceInfo.getPassword();
String dbVersion = jdbcSourceInfo.getDbVersion();
boolean ext = jdbcSourceInfo.isExt();
String key = getDataSourceKey(jdbcSourceInfo);
DruidDataSource druidDataSource = dataSourceMap.get(key);
if (druidDataSource != null && !druidDataSource.isClosed()) {
return druidDataSource;
}
Lock lock = getDataSourceLock(key);
try {
if (!lock.tryLock(30L, TimeUnit.SECONDS)) {
druidDataSource = dataSourceMap.get(key);
if (druidDataSource != null && !druidDataSource.isClosed()) {
return druidDataSource;
}
throw new SourceException("Unable to get datasource for jdbcUrl: " + jdbcUrl);
}
}
catch (InterruptedException e) {
throw new SourceException("Unable to get datasource for jdbcUrl: " + jdbcUrl);
}
druidDataSource = dataSourceMap.get(key);
if (druidDataSource != null && !druidDataSource.isClosed()) {
lock.unlock();
return druidDataSource;
}
druidDataSource = new DruidDataSource();
try {
if (StringUtils.isEmpty(dbVersion) ||
!ext || JDBC_DATASOURCE_DEFAULT_VERSION.equals(dbVersion)) {
String className = SourceUtils.getDriverClassName(jdbcUrl, null);
try {
Class.forName(className);
} catch (ClassNotFoundException e) {
throw new SourceException("Unable to get driver instance for jdbcUrl: " + jdbcUrl);
}
druidDataSource.setDriverClassName(className);
} else {
druidDataSource.setDriverClassName(CustomDataSourceUtils.getInstance(jdbcUrl, dbVersion).getDriver());
String path = System.getenv("DAVINCI3_HOME") + File.separator + String.format(Consts.PATH_EXT_FORMATTER, jdbcSourceInfo.getDatabase(), dbVersion);
druidDataSource.setDriverClassLoader(ExtendedJdbcClassLoader.getExtJdbcClassLoader(path));
}
druidDataSource.setUrl(jdbcUrl);
druidDataSource.setUsername(username);
if (!jdbcUrl.toLowerCase().contains(DataTypeEnum.PRESTO.getFeature())) {
druidDataSource.setPassword(password);
}
druidDataSource.setInitialSize(initialSize);
druidDataSource.setMinIdle(minIdle);
druidDataSource.setMaxActive(maxActive);
druidDataSource.setMaxWait(maxWait);
druidDataSource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
druidDataSource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
druidDataSource.setMaxEvictableIdleTimeMillis(maxEvictableIdleTimeMillis);
druidDataSource.setTimeBetweenConnectErrorMillis(timeBetweenConnectErrorMillis);
druidDataSource.setTestWhileIdle(testWhileIdle);
druidDataSource.setTestOnBorrow(testOnBorrow);
druidDataSource.setTestOnReturn(testOnReturn);
druidDataSource.setConnectionErrorRetryAttempts(connectionErrorRetryAttempts);
druidDataSource.setBreakAfterAcquireFailure(breakAfterAcquireFailure);
druidDataSource.setKeepAlive(keepAlive);
druidDataSource.setValidationQueryTimeout(validationQueryTimeout);
druidDataSource.setValidationQuery(validationQuery);
druidDataSource.setRemoveAbandoned(true);
druidDataSource.setRemoveAbandonedTimeout(3600 + 5 * 60);
druidDataSource.setLogAbandoned(true);
// default validation query
String driverName = druidDataSource.getDriverClassName();
if (driverName.indexOf("sqlserver") != -1 || driverName.indexOf("mysql") != -1
|| driverName.indexOf("h2") != -1 || driverName.indexOf("moonbox") != -1) {
druidDataSource.setValidationQuery("select 1");
}
if (driverName.indexOf("oracle") != -1) {
druidDataSource.setValidationQuery("select 1 from dual");
}
if (driverName.indexOf("elasticsearch") != -1) {
druidDataSource.setValidationQuery(null);
}
if (!CollectionUtils.isEmpty(jdbcSourceInfo.getProperties())) {
Properties properties = new Properties();
jdbcSourceInfo.getProperties().forEach(dict -> properties.setProperty(dict.getKey(), dict.getValue()));
druidDataSource.setConnectProperties(properties);
}
try {
druidDataSource.setFilters(filters);
druidDataSource.init();
} catch (Exception e) {
log.error("Exception during pool initialization", e);
throw new SourceException(e.getMessage());
}
dataSourceMap.put(key, druidDataSource);
}finally {
lock.unlock();
}
return druidDataSource;
}
private String getDataSourceKey (JdbcSourceInfo jdbcSourceInfo) {
return SourceUtils.getKey(jdbcSourceInfo.getJdbcUrl(),
jdbcSourceInfo.getUsername(),
jdbcSourceInfo.getPassword(),
jdbcSourceInfo.getDbVersion(),
jdbcSourceInfo.isExt());
}
}
| [
"\"DAVINCI3_HOME\""
] | [] | [
"DAVINCI3_HOME"
] | [] | ["DAVINCI3_HOME"] | java | 1 | 0 | |
dct_backend/reviews.py | from fastapi import APIRouter, Response, status
from dotenv import load_dotenv
import os
from database import Database
from pydantic import BaseModel
load_dotenv()
db = Database(os.getenv('DB_USERNAME'), os.getenv('DB_PASSWORD'))
db.connect()
food_db = db.client["food"] # from food collection
review_collection = food_db["reviews"] # review --> sub collection
router = APIRouter(prefix="/reviews")
class Reviews(BaseModel):
"""
food_name - the name or id associated with a food item
rating - 1 to 5 stars
review - written review
userame - unique ID for each user of quack app
"""
food_name: str
rating: int
review: str
email: str
dry_run: bool = False
@router.post("/review", status_code=status.HTTP_201_CREATED)
async def save_review_in_db(resp: Response, review_rating: Reviews):
"""
if a user has already reviewed an item retun err
else insert a review
*only saves reviews that are not tests
"""
if review_rating.food_name == "" or review_rating.food_name != review_rating.food_name.lower():
resp.status_code = status.HTTP_400_BAD_REQUEST
return {"err": "Inappropriate food name"}
if review_rating.email == "":
resp.status_code = status.HTTP_400_BAD_REQUEST
return {"err": "No email specified"}
# review_rating.food_name = review_rating.food_name.lower() # to lowercase
if review_collection.count_documents({"email": review_rating.email, "foodID": review_rating.food_name}, limit = 1) > 0: # each user can add one review per food item
resp.status_code = status.HTTP_400_BAD_REQUEST
return {"err": "User has already made a review. Try editing existing review"}
if not review_rating.dry_run:
review_collection.insert_one({"food_name": review_rating.food_name, "rating": review_rating.rating, "review": review_rating.review, "email": review_rating.email})
resp.status_code = status.HTTP_201_CREATED
return {"msg": "Review has been successfully created."}
@router.post("/edit-review", status_code=status.HTTP_201_CREATED)
async def edit_review_in_db(resp: Response, review_rating: Reviews, dry_run = False):
"""
if review does not exit return an error
else delete old review and insert new review
*only edits reviews that are not tests
"""
if(dry_run):
if review_rating.food_name != review_rating.food_name.lower():# check if food_name is in valid format!
return False # wrong food_name format
else:
return True
if(not dry_run): # if false
if review_rating.food_name == "":
resp.status_code = status.HTTP_400_BAD_REQUEST
return {"err": "Empty foodID"}
if review_rating.email == "":
resp.status_code = status.HTTP_400_BAD_REQUEST
return {"err": "No email specified"}
review_rating.food_name = review_rating.food_name.lower() # to lowercase
if review_collection.count_documents({"email": review_rating.username,"foodID": review_rating.food_name}, limit = 1) == 0: # review does not exist for specified food item
resp.status_code = status.HTTP_400_BAD_REQUEST
return {"err": "User's review does not exist"}
await review_collection.delete_one({"email": review_rating['email']}) # delete old review
review_collection.insert_one(review_rating) # replacement
resp.status_code = status.HTTP_201_CREATED
return {"msg": "Review has been successfully edited."}
@router.get("/get-reviews")
async def get_reviews(resp: Response, review_rating: Reviews):
"""
returns all reviews for food item that matches 'foodID'
"""
review_rating.food_name = review_rating.food_name.lower() # to lowercase
return review_collection.find_one({"foodID": review_rating.food_name})
| [] | [] | [
"DB_USERNAME",
"DB_PASSWORD"
] | [] | ["DB_USERNAME", "DB_PASSWORD"] | python | 2 | 0 | |
Crawler/crawler.py | import abc
import argparse
import requests
import logging
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
logger = logging.getLogger('crawler')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)4s -'
' [%(filename)s:%(lineno)5s -'
'%(funcName)10s() ] - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class BaseWriter(abc.ABC):
@abc.abstractmethod
def save(self):
pass
class BaseCrawler(abc.ABC):
@abc.abstractmethod
def start(self):
pass
class WriterLocalFile(BaseWriter):
"""
writer object we can use to write a result
for example to the file on a local machine
or to the AWS, GAE or Microsoft Asure
or send by a network to another microservice.
"""
def save(self, filename, data):
with open(filename, 'w') as f:
try:
f.write(data)
except IOError:
logger.error('Failed to save data, IOError occurred.',
exc_info=True)
except Exception as e:
logger.error('An unexpected error occurred,'
' failed to write data on disk.',
exc_info=True)
class CrawlerSnap(BaseCrawler):
writer = None
url = None
filename = None
def __init__(self, url=None, filename=None, writer=None):
self.url = url
self.filename = filename
self.writer = writer()
def download(self, timeout=6):
logger.info("* Selenium webdrivir initialization...")
browser = webdriver.PhantomJS()
browser.get(self.url)
try:
element_present = EC.presence_of_element_located(
(By.CLASS_NAME, 'styles__roles-table-term___1HCOC'))
WebDriverWait(browser, timeout).until(element_present)
except TimeoutException:
logger.warn("Timed out waiting for page to load.")
html = browser.page_source
logger.info("* A web page was loaded...")
return html
def start(self):
data = self.download()
logger.info("* Writing data...")
if (self.writer):
self.writer.save(self.filename, data)
else:
raise UnboundLocalError('No writerClass supplied to Clawler!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Crawler download and save data from a webpages.')
parser.add_argument('url', metavar='url', type=str,
help='url of the web page that'
' would be downloaded and saved.')
parser.add_argument('filename', metavar='filename', type=str,
help='name of the file where html page'
' will be saved..')
args = parser.parse_args()
url = args.url
filename = args.filename
logger.info("* Creating crawler...")
crawler = CrawlerSnap(url, filename, writer=WriterLocalFile)
logger.info("* Crawler started...")
crawler.start()
| [] | [] | [] | [] | [] | python | null | null | null |
python/catkin/builder.py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import copy
import io
import multiprocessing
import os
import platform
import re
import stat
import subprocess
import sys
try:
from catkin_pkg.cmake import configure_file, get_metapackage_cmake_template_path
from catkin_pkg.packages import find_packages
from catkin_pkg.topological_order import topological_order_packages
except ImportError as e:
sys.exit(
'ImportError: "from catkin_pkg.topological_order import '
'topological_order" failed: %s\nMake sure that you have installed '
'"catkin_pkg", it is up to date and on the PYTHONPATH.' % e
)
from catkin.cmake import get_cmake_path
from catkin.terminal_color import ansi, disable_ANSI_colors, fmt, sanitize
def split_arguments(args, splitter_name, default=None):
if splitter_name not in args:
return args, default
index = args.index(splitter_name)
return args[0:index], args[index + 1:]
def extract_cmake_and_make_arguments(args):
args, cmake_args, make_args, _ = _extract_cmake_and_make_arguments(args, extract_catkin_make=False)
return args, cmake_args, make_args
def extract_cmake_and_make_and_catkin_make_arguments(args):
return _extract_cmake_and_make_arguments(args, extract_catkin_make=True)
def _extract_cmake_and_make_arguments(args, extract_catkin_make):
cmake_args = []
make_args = []
catkin_make_args = []
arg_types = {
'--cmake-args': cmake_args,
'--make-args': make_args
}
if extract_catkin_make:
arg_types['--catkin-make-args'] = catkin_make_args
arg_indexes = {}
for k in arg_types.keys():
if k in args:
arg_indexes[args.index(k)] = k
for index in reversed(sorted(arg_indexes.keys())):
arg_type = arg_indexes[index]
args, specific_args = split_arguments(args, arg_type)
arg_types[arg_type].extend(specific_args)
# classify -D* and -G* arguments as cmake specific arguments
implicit_cmake_args = [a for a in args if a.startswith('-D') or a.startswith('-G')]
args = [a for a in args if a not in implicit_cmake_args]
return args, implicit_cmake_args + cmake_args, make_args, catkin_make_args
def cprint(msg, end=None):
print(fmt(msg), end=end)
def colorize_line(line):
cline = sanitize(line)
cline = cline.replace(
'-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~',
'-- @{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~@|'
)
if line.startswith('-- ~~'):
# -- ~~ -
cline = cline.replace('~~ ', '@{pf}~~ @|')
cline = cline.replace(' - ', ' - @!@{bf}')
cline = cline.replace('(', '@|(')
cline = cline.replace('(plain cmake)', '@|(@{rf}plain cmake@|)')
cline = cline.replace('(unknown)', '@|(@{yf}unknown@|)')
if line.startswith('-- +++'):
# -- +++ add_subdirectory(package)
cline = cline.replace('+++', '@!@{gf}+++@|')
cline = cline.replace('kin package: \'', 'kin package: \'@!@{bf}')
cline = cline.replace(')', '@|)')
cline = cline.replace('\'\n', '@|\'\n')
cline = cline.replace('cmake package: \'', 'cmake package: \'@!@{bf}')
cline = cline.replace('\'\n', '@|\'\n')
if line.startswith('-- ==>'):
cline = cline.replace('-- ==>', '-- @!@{bf}==>@|')
if line.lower().startswith('warning'):
# WARNING
cline = ansi('yf') + cline
if line.startswith('CMake Warning'):
# CMake Warning...
cline = cline.replace('CMake Warning', '@{yf}@!CMake Warning@|')
if line.startswith('ERROR:'):
# ERROR:
cline = cline.replace('ERROR:', '@!@{rf}ERROR:@|')
if line.startswith('CMake Error'):
# CMake Error...
cline = cline.replace('CMake Error', '@{rf}@!CMake Error@|')
if line.startswith('Call Stack (most recent call first):'):
# CMake Call Stack
cline = cline.replace('Call Stack (most recent call first):',
'@{cf}@_Call Stack (most recent call first):@|')
return fmt(cline)
def print_command_banner(cmd, cwd, color):
if color:
# Prepare for printing
cmd_str = sanitize(' '.join(cmd))
cwd_str = sanitize(cwd)
# Print command notice
cprint('@{bf}####')
cprint('@{bf}#### Running command: @!"%s"@|@{bf} in @!"%s"' % (cmd_str, cwd_str))
cprint('@{bf}####')
else:
print('####')
print('#### Running command: "%s" in "%s"' % (' '.join(cmd), cwd))
print('####')
def run_command_colorized(cmd, cwd, quiet=False, add_env=None):
run_command(cmd, cwd, quiet=quiet, colorize=True, add_env=add_env)
def run_command(cmd, cwd, quiet=False, colorize=False, add_env=None):
capture = (quiet or colorize)
stdout_pipe = subprocess.PIPE if capture else None
stderr_pipe = subprocess.STDOUT if capture else None
env = None
if add_env:
env = copy.copy(os.environ)
env.update(add_env)
try:
proc = subprocess.Popen(
cmd, cwd=cwd, shell=False,
stdout=stdout_pipe, stderr=stderr_pipe,
env=env
)
except OSError as e:
raise OSError("Failed command '%s': %s" % (cmd, e))
out = io.StringIO() if quiet else sys.stdout
if capture:
while True:
line = unicode(proc.stdout.readline().decode('utf8', 'replace'))
if proc.returncode is not None or not line:
break
try:
line = colorize_line(line) if colorize else line
except Exception as e:
import traceback
traceback.print_exc()
print('<caktin_make> color formatting problem: ' + str(e),
file=sys.stderr)
out.write(line)
proc.wait()
if proc.returncode:
if quiet:
print(out.getvalue())
raise subprocess.CalledProcessError(proc.returncode, ' '.join(cmd))
return out.getvalue() if quiet else ''
blue_arrow = '@!@{bf}==>@|@!'
def _check_build_dir(name, workspace, buildspace):
package_build_dir = os.path.join(buildspace, name)
if not os.path.exists(package_build_dir):
cprint(
blue_arrow + ' Creating build directory: \'' +
os.path.relpath(package_build_dir, workspace) + '\'@|'
)
os.mkdir(package_build_dir)
return package_build_dir
def isolation_print_command(cmd, path=None, add_env=None):
cprint(
blue_arrow + " " + sanitize(cmd) + "@|" +
(" @!@{kf}in@| '@!" + sanitize(path) + "@|'" if path else '') +
(" @!@{kf}with@| '@!" + ' '.join(['%s=%s' % (k, v) for k, v in add_env.items()]) + "@|'" if add_env else '')
)
def get_python_install_dir():
# this function returns the same value as the CMake variable PYTHON_INSTALL_DIR from catkin/cmake/python.cmake
python_install_dir = 'lib'
if os.name != 'nt':
python_version_xdoty = str(sys.version_info[0]) + '.' + str(sys.version_info[1])
python_install_dir = os.path.join(python_install_dir, 'python' + python_version_xdoty)
python_use_debian_layout = os.path.exists('/etc/debian_version')
python_packages_dir = 'dist-packages' if python_use_debian_layout else 'site-packages'
python_install_dir = os.path.join(python_install_dir, python_packages_dir)
return python_install_dir
def handle_make_arguments(input_make_args, force_single_threaded_when_running_tests=False):
make_args = list(input_make_args)
if force_single_threaded_when_running_tests:
# force single threaded execution when running test since rostest does not support multiple parallel runs
run_tests = [a for a in make_args if a.startswith('run_tests')]
if run_tests:
print('Forcing "-j1" for running unit tests.')
make_args.append('-j1')
# If no -j/--jobs/-l/--load-average flags are in make_args
if not extract_jobs_flags(' '.join(make_args)):
# If -j/--jobs/-l/--load-average are in MAKEFLAGS
if 'MAKEFLAGS' in os.environ and extract_jobs_flags(os.environ['MAKEFLAGS']):
# Do not extend make arguments, let MAKEFLAGS set things
pass
else:
# Else extend the make_arguments to include some jobs flags
# If ROS_PARALLEL_JOBS is set use those flags
if 'ROS_PARALLEL_JOBS' in os.environ:
# ROS_PARALLEL_JOBS is a set of make variables, not just a number
ros_parallel_jobs = os.environ['ROS_PARALLEL_JOBS']
make_args.extend(ros_parallel_jobs.split())
else:
# Else Use the number of CPU cores
try:
jobs = multiprocessing.cpu_count()
make_args.append('-j{0}'.format(jobs))
make_args.append('-l{0}'.format(jobs))
except NotImplementedError:
# If the number of cores cannot be determined, do not extend args
pass
return make_args
def extract_jobs_flags(mflags):
regex = r'(?:^|\s)(-?(?:j|l)(?:\s*[0-9]+|\s|$))' + \
r'|' + \
r'(?:^|\s)((?:--)?(?:jobs|load-average)(?:(?:=|\s+)[0-9]+|(?:\s|$)))'
matches = re.findall(regex, mflags) or []
matches = [m[0] or m[1] for m in matches]
return ' '.join([m.strip() for m in matches]) if matches else None
def build_catkin_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args,
destdir=None
):
cprint(
"Processing @{cf}catkin@| package: '@!@{bf}" +
package.name + "@|'"
)
# Make the build dir
build_dir = _check_build_dir(package.name, workspace, buildspace)
# Check last_env
if last_env is not None:
cprint(
blue_arrow + " Building with env: " +
"'{0}'".format(last_env)
)
# Check for Makefile and maybe call cmake
makefile = os.path.join(build_dir, 'Makefile')
if not os.path.exists(makefile) or force_cmake:
package_dir = os.path.dirname(package.filename)
if not os.path.exists(os.path.join(package_dir, 'CMakeLists.txt')):
export_tags = [e.tagname for e in package.exports]
if 'metapackage' not in export_tags:
print(colorize_line('Error: Package "%s" does not have a CMakeLists.txt file' % package.name))
sys.exit('Can not build catkin package without CMakeLists.txt file')
# generate CMakeLists.txt for metpackages without one
print(colorize_line('Warning: metapackage "%s" should have a CMakeLists.txt file' % package.name))
cmake_code = configure_file(
get_metapackage_cmake_template_path(),
{'name': package.name, 'metapackage_arguments': 'DIRECTORY "%s"' % package_dir})
cmakelists_txt = os.path.join(build_dir, 'CMakeLists.txt')
with open(cmakelists_txt, 'w') as f:
f.write(cmake_code)
package_dir = build_dir
# Run cmake
cmake_cmd = [
'cmake',
package_dir,
'-DCATKIN_DEVEL_PREFIX=' + develspace,
'-DCMAKE_INSTALL_PREFIX=' + installspace
]
cmake_cmd.extend(cmake_args)
add_env = get_additional_environment(install, destdir, installspace)
isolation_print_command(' '.join(cmake_cmd), build_dir, add_env=add_env)
if last_env is not None:
cmake_cmd = [last_env] + cmake_cmd
try:
run_command_colorized(cmake_cmd, build_dir, quiet, add_env=add_env)
except subprocess.CalledProcessError as e:
if os.path.exists(makefile):
# remove Makefile to force CMake invocation next time
os.remove(makefile)
raise
else:
print('Makefile exists, skipping explicit cmake invocation...')
# Check to see if cmake needs to be run via make
make_check_cmake_cmd = ['make', 'cmake_check_build_system']
add_env = get_additional_environment(install, destdir, installspace)
isolation_print_command(' '.join(make_check_cmake_cmd), build_dir, add_env=add_env)
if last_env is not None:
make_check_cmake_cmd = [last_env] + make_check_cmake_cmd
run_command_colorized(
make_check_cmake_cmd, build_dir, quiet, add_env=add_env
)
# Run make
make_cmd = ['make']
make_cmd.extend(handle_make_arguments(make_args, force_single_threaded_when_running_tests=True))
isolation_print_command(' '.join(make_cmd), build_dir)
if last_env is not None:
make_cmd = [last_env] + make_cmd
run_command(make_cmd, build_dir, quiet)
# Make install
if install:
if has_make_target(build_dir, 'install'):
make_install_cmd = ['make', 'install']
isolation_print_command(' '.join(make_install_cmd), build_dir)
if last_env is not None:
make_install_cmd = [last_env] + make_install_cmd
run_command(make_install_cmd, build_dir, quiet)
else:
print(fmt('@{yf}Package has no "@{boldon}install@{boldoff}" target, skipping "make install" invocation...'))
def has_make_target(path, target):
output = run_command(['make', '-pn'], path, quiet=True)
lines = output.splitlines()
targets = [m.group(1) for m in [re.match('^([a-zA-Z0-9][a-zA-Z0-9_\.]*):', l) for l in lines] if m]
return target in targets
def get_additional_environment(install, destdir, installspace):
add_env = {}
if install and destdir:
add_env['_CATKIN_SETUP_DIR'] = os.path.join(destdir, installspace[1:])
return add_env
def build_cmake_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args,
destdir=None
):
# Notify the user that we are processing a plain cmake package
cprint(
"Processing @{cf}plain cmake@| package: '@!@{bf}" + package.name +
"@|'"
)
# Make the build dir
build_dir = _check_build_dir(package.name, workspace, buildspace)
# Check last_env
if last_env is not None:
cprint(blue_arrow + " Building with env: " +
"'{0}'".format(last_env))
# Check for Makefile and maybe call cmake
makefile = os.path.join(build_dir, 'Makefile')
install_target = installspace if install else develspace
if not os.path.exists(makefile) or force_cmake:
# Call cmake
cmake_cmd = [
'cmake',
os.path.dirname(package.filename),
'-DCMAKE_INSTALL_PREFIX=' + install_target
]
cmake_cmd.extend(cmake_args)
isolation_print_command(' '.join(cmake_cmd), build_dir)
if last_env is not None:
cmake_cmd = [last_env] + cmake_cmd
run_command_colorized(cmake_cmd, build_dir, quiet)
else:
print('Makefile exists, skipping explicit cmake invocation...')
# Check to see if cmake needs to be run via make
make_check_cmake_cmd = ['make', 'cmake_check_build_system']
isolation_print_command(' '.join(make_check_cmake_cmd), build_dir)
if last_env is not None:
make_check_cmake_cmd = [last_env] + make_check_cmake_cmd
run_command_colorized(
make_check_cmake_cmd, build_dir, quiet
)
# Run make
make_cmd = ['make']
make_cmd.extend(handle_make_arguments(make_args))
isolation_print_command(' '.join(make_cmd), build_dir)
if last_env is not None:
make_cmd = [last_env] + make_cmd
run_command(make_cmd, build_dir, quiet)
# Make install
make_install_cmd = ['make', 'install']
isolation_print_command(' '.join(make_install_cmd), build_dir)
if last_env is not None:
make_install_cmd = [last_env] + make_install_cmd
run_command(make_install_cmd, build_dir, quiet)
# If we are installing, and a env.sh exists, don't overwrite it
if install and os.path.exists(os.path.join(installspace, 'env.sh')):
return
cprint(blue_arrow + " Generating an env.sh")
# Generate env.sh for chaining to catkin packages
# except if using --merge which implies that new_env_path equals last_env
new_env_path = os.path.join(install_target, 'env.sh')
new_env_path = prefix_destdir(new_env_path, destdir)
if new_env_path != last_env:
variables = {
'SETUP_DIR': install_target,
'SETUP_FILENAME': 'setup'
}
with open(os.path.join(new_env_path), 'w') as f:
f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
if [ $# -eq 0 ] ; then
/bin/echo "Usage: env.sh COMMANDS"
/bin/echo "Calling env.sh without arguments is not supported anymore. Instead spawn a subshell and source a setup file manually."
exit 1
fi
# source {SETUP_FILENAME}.sh from same directory as this file
. "$(cd "`dirname "$0"`" && pwd)/{SETUP_FILENAME}.sh"
exec "$@"
""".format(**variables))
os.chmod(new_env_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
# Generate setup.sh for chaining to catkin packages
# except if using --merge which implies that new_setup_path equals last_setup_env
new_setup_path = os.path.join(install_target, 'setup.sh')
new_setup_path = prefix_destdir(new_setup_path, destdir)
last_setup_env = os.path.join(os.path.dirname(last_env), 'setup.sh') if last_env is not None else None
if new_setup_path != last_setup_env:
subs = {}
subs['cmake_prefix_path'] = install_target + ":"
subs['ld_path'] = os.path.join(install_target, 'lib') + ":"
pythonpath = os.path.join(install_target, get_python_install_dir())
subs['pythonpath'] = pythonpath + ':'
subs['pkgcfg_path'] = os.path.join(install_target, 'lib', 'pkgconfig')
subs['pkgcfg_path'] += ":"
subs['path'] = os.path.join(install_target, 'bin') + ":"
if not os.path.exists(os.path.dirname(new_setup_path)):
os.mkdir(os.path.dirname(new_setup_path))
with open(new_setup_path, 'w') as file_handle:
file_handle.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
""")
if last_env is not None:
file_handle.write('. %s\n\n' % last_setup_env)
file_handle.write("""\
# detect if running on Darwin platform
_UNAME=`uname -s`
IS_DARWIN=0
if [ "$_UNAME" = "Darwin" ]; then
IS_DARWIN=1
fi
# Prepend to the environment
export CMAKE_PREFIX_PATH="{cmake_prefix_path}$CMAKE_PREFIX_PATH"
if [ $IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH="{ld_path}$LD_LIBRARY_PATH"
else
export DYLD_LIBRARY_PATH="{ld_path}$DYLD_LIBRARY_PATH"
fi
export PATH="{path}$PATH"
export PKG_CONFIG_PATH="{pkgcfg_path}$PKG_CONFIG_PATH"
export PYTHONPATH="{pythonpath}$PYTHONPATH"
""".format(**subs))
def build_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args, catkin_make_args,
destdir=None,
number=None, of=None
):
if platform.system() in ['Linux', 'Darwin']:
status_msg = '{package_name} [{number} of {total}]'.format(package_name=package.name, number=number, total=of)
sys.stdout.write("\x1b]2;" + status_msg + "\x07")
cprint('@!@{gf}==>@| ', end='')
new_last_env = get_new_env(package, develspace, installspace, install, last_env, destdir)
build_type = _get_build_type(package)
if build_type == 'catkin':
build_catkin_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args + catkin_make_args,
destdir=destdir
)
if not os.path.exists(new_last_env):
raise RuntimeError(
"No env.sh file generated at: '" + new_last_env +
"'\n This sometimes occurs when a non-catkin package is "
"interpreted as a catkin package.\n This can also occur "
"when the cmake cache is stale, try --force-cmake."
)
elif build_type == 'cmake':
build_cmake_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args,
destdir=destdir
)
else:
sys.exit('Can not build package with unknown build_type')
if number is not None and of is not None:
msg = ' [@{gf}@!' + str(number) + '@| of @!@{gf}' + str(of) + '@|]'
else:
msg = ''
cprint('@{gf}<==@| Finished processing package' + msg + ': \'@{bf}@!' +
package.name + '@|\'')
return new_last_env
def get_new_env(package, develspace, installspace, install, last_env, destdir=None):
new_env = None
build_type = _get_build_type(package)
if build_type in ['catkin', 'cmake']:
new_env = os.path.join(
installspace if install else develspace,
'env.sh'
)
new_env = prefix_destdir(new_env, destdir)
return new_env
def prefix_destdir(path, destdir=None):
if destdir is not None:
path = os.path.join(destdir, path[1:])
return path
def _get_build_type(package):
build_type = 'catkin'
if 'build_type' in [e.tagname for e in package.exports]:
build_type = [e.content for e in package.exports if e.tagname == 'build_type'][0]
return build_type
def _print_build_error(package, e):
e_msg = 'KeyboardInterrupt' if isinstance(e, KeyboardInterrupt) else str(e)
cprint('@{rf}@!<==@| Failed to process package \'@!@{bf}' + package.name + '@|\': \n ' + e_msg)
def build_workspace_isolated(
workspace='.',
sourcespace=None,
buildspace=None,
develspace=None,
installspace=None,
merge=False,
install=False,
force_cmake=False,
colorize=True,
build_packages=None,
quiet=False,
cmake_args=None,
make_args=None,
catkin_make_args=None,
continue_from_pkg=False,
destdir=None
):
'''
Runs ``cmake``, ``make`` and optionally ``make install`` for all
catkin packages in sourcespace_dir. It creates several folders
in the current working directory. For non-catkin packages it runs
``cmake``, ``make`` and ``make install`` for each, installing it to
the devel space or install space if the ``install`` option is specified.
:param workspace: path to the current workspace, ``str``
:param sourcespace: workspace folder containing catkin packages, ``str``
:param buildspace: path to build space location, ``str``
:param develspace: path to devel space location, ``str``
:param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
:param merge: if True, build each catkin package into the same
devel space (not affecting plain cmake packages), ``bool``
:param install: if True, install all packages to the install space,
``bool``
:param force_cmake: (optional), if True calls cmake explicitly for each
package, ``bool``
:param colorize: if True, colorize cmake output and other messages,
``bool``
:param build_packages: specific packages to build (all parent packages
in the topological order must have been built before), ``str``
:param quiet: if True, hides some build output, ``bool``
:param cmake_args: additional arguments for cmake, ``[str]``
:param make_args: additional arguments for make, ``[str]``
:param catkin_make_args: additional arguments for make but only for catkin
packages, ``[str]``
:param continue_from_pkg: indicates whether or not cmi should continue
when a package is reached, ``bool``
:param destdir: define DESTDIR for cmake/invocation, ``string``
'''
if not colorize:
disable_ANSI_colors()
# Check workspace existance
if not os.path.exists(workspace):
sys.exit("Workspace path '{0}' does not exist.".format(workspace))
workspace = os.path.abspath(workspace)
# Check source space existance
if sourcespace is None:
ws_sourcespace = os.path.join(workspace, 'src')
if not os.path.exists(ws_sourcespace):
sys.exit("Could not find source space: {0}".format(sourcespace))
sourcespace = ws_sourcespace
sourcespace = os.path.abspath(sourcespace)
print('Base path: ' + str(workspace))
print('Source space: ' + str(sourcespace))
# Check build space
if buildspace is None:
buildspace = os.path.join(workspace, 'build_isolated')
buildspace = os.path.abspath(buildspace)
if not os.path.exists(buildspace):
os.mkdir(buildspace)
print('Build space: ' + str(buildspace))
# Check devel space
if develspace is None:
develspace = os.path.join(workspace, 'devel_isolated')
develspace = os.path.abspath(develspace)
print('Devel space: ' + str(develspace))
# Check install space
if installspace is None:
installspace = os.path.join(workspace, 'install_isolated')
installspace = os.path.abspath(installspace)
print('Install space: ' + str(installspace))
if cmake_args:
print("Additional CMake Arguments: " + " ".join(cmake_args))
else:
cmake_args = []
if make_args:
print("Additional make Arguments: " + " ".join(make_args))
else:
make_args = []
if catkin_make_args:
print("Additional make Arguments for catkin packages: " + " ".join(catkin_make_args))
else:
catkin_make_args = []
# Find packages
packages = find_packages(sourcespace, exclude_subspaces=True)
if not packages:
print(fmt("@{yf}No packages found in source space: %s@|" % sourcespace))
# verify that specified package exists in workspace
if build_packages:
packages_by_name = {p.name: path for path, p in packages.iteritems()}
unknown_packages = [p for p in build_packages if p not in packages_by_name]
if unknown_packages:
sys.exit('Packages not found in the workspace: %s' % ', '.join(unknown_packages))
# Report topological ordering
ordered_packages = topological_order_packages(packages)
unknown_build_types = []
msg = []
msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
msg.append('@{pf}~~@| traversing %d packages in topological order:' % len(ordered_packages))
for path, package in ordered_packages:
export_tags = [e.tagname for e in package.exports]
if 'build_type' in export_tags:
build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0]
else:
build_type_tag = 'catkin'
if build_type_tag == 'catkin':
msg.append('@{pf}~~@| - @!@{bf}' + package.name + '@|')
elif build_type_tag == 'cmake':
msg.append(
'@{pf}~~@| - @!@{bf}' + package.name + '@|' +
' (@!@{cf}plain cmake@|)'
)
else:
msg.append(
'@{pf}~~@| - @!@{bf}' + package.name + '@|' +
' (@{rf}unknown@|)'
)
unknown_build_types.append(package)
msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
for index in range(len(msg)):
msg[index] = fmt(msg[index])
print('\n'.join(msg))
# Error if there are packages with unknown build_types
if unknown_build_types:
print(colorize_line('Error: Packages with unknown build types exist'))
sys.exit('Can not build workspace with packages of unknown build_type')
# Check to see if the workspace has changed
if not force_cmake:
force_cmake, install_toggled = cmake_input_changed(
packages,
buildspace,
install=install,
cmake_args=cmake_args,
filename='catkin_make_isolated'
)
if force_cmake:
print('The packages or cmake arguments have changed, forcing cmake invocation')
elif install_toggled:
print('The install argument has been toggled, forcing cmake invocation on plain cmake package')
# Build packages
pkg_develspace = None
last_env = None
for index, path_package in enumerate(ordered_packages):
path, package = path_package
if merge:
pkg_develspace = develspace
else:
pkg_develspace = os.path.join(develspace, package.name)
if not build_packages or package.name in build_packages:
if continue_from_pkg and build_packages and package.name in build_packages:
build_packages = None
try:
export_tags = [e.tagname for e in package.exports]
is_cmake_package = 'cmake' in [e.content for e in package.exports if e.tagname == 'build_type']
last_env = build_package(
path, package,
workspace, buildspace, pkg_develspace, installspace,
install, force_cmake or (install_toggled and is_cmake_package),
quiet, last_env, cmake_args, make_args, catkin_make_args,
destdir=destdir,
number=index + 1, of=len(ordered_packages)
)
except subprocess.CalledProcessError as e:
_print_build_error(package, e)
# Let users know how to reproduce
# First add the cd to the build folder of the package
cmd = 'cd ' + os.path.join(buildspace, package.name) + ' && '
# Then reproduce the command called
cmd += ' '.join(e.cmd) if isinstance(e.cmd, list) else e.cmd
print(fmt("\n@{rf}Reproduce this error by running:"))
print(fmt("@{gf}@!==> @|") + cmd + "\n")
sys.exit('Command failed, exiting.')
except Exception as e:
print("Unhandled exception of type '{0}':".format(type(e).__name__))
import traceback
traceback.print_exc()
_print_build_error(package, e)
sys.exit('Command failed, exiting.')
else:
cprint("Skipping package: '@!@{bf}" + package.name + "@|'")
last_env = get_new_env(package, pkg_develspace, installspace, install, last_env, destdir)
# Provide a top level devel space environment setup script
if not os.path.exists(develspace):
os.makedirs(develspace)
if not build_packages:
generated_env_sh = os.path.join(develspace, 'env.sh')
generated_setup_util_py = os.path.join(develspace, '_setup_util.py')
if not merge and pkg_develspace:
# generate env.sh and setup.sh|bash|zsh which relay to last devel space
with open(generated_env_sh, 'w') as f:
f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
{0} "$@"
""".format(os.path.join(pkg_develspace, 'env.sh')))
os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
for shell in ['sh', 'bash', 'zsh']:
with open(os.path.join(develspace, 'setup.%s' % shell), 'w') as f:
f.write("""\
#!/usr/bin/env {1}
# generated from catkin.builder module
. "{0}/setup.{1}"
""".format(pkg_develspace, shell))
# remove _setup_util.py file which might have been generated for an empty devel space before
if os.path.exists(generated_setup_util_py):
os.remove(generated_setup_util_py)
elif not pkg_develspace:
# generate env.sh and setup.sh|bash|zsh for an empty devel space
if 'CMAKE_PREFIX_PATH' in os.environ.keys():
variables = {
'CATKIN_GLOBAL_BIN_DESTINATION': 'bin',
'CATKIN_GLOBAL_LIB_DESTINATION': 'lib',
'CMAKE_PREFIX_PATH_AS_IS': ';'.join(os.environ['CMAKE_PREFIX_PATH'].split(os.pathsep)),
'PYTHON_INSTALL_DIR': get_python_install_dir(),
}
with open(generated_setup_util_py, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', '_setup_util.py.in'), variables))
os.chmod(generated_setup_util_py, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
else:
sys.exit("Unable to process CMAKE_PREFIX_PATH from environment. Cannot generate environment files.")
variables = {'SETUP_FILENAME': 'setup'}
with open(generated_env_sh, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'env.sh.in'), variables))
os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
variables = {'SETUP_DIR': develspace}
for shell in ['sh', 'bash', 'zsh']:
with open(os.path.join(develspace, 'setup.%s' % shell), 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'setup.%s.in' % shell), variables))
def cmake_input_changed(packages, build_path, install=None, cmake_args=None, filename='catkin_make'):
# get current input
package_paths = os.pathsep.join(sorted(packages.keys()))
cmake_args = ' '.join(cmake_args) if cmake_args else ''
# file to store current input
changed = False
install_toggled = False
input_filename = os.path.join(build_path, '%s.cache' % filename)
if not os.path.exists(input_filename):
changed = True
else:
# compare with previously stored input
with open(input_filename, 'r') as f:
previous_package_paths = f.readline().rstrip()
previous_cmake_args = f.readline().rstrip()
previous_install = f.readline().rstrip() == str(True)
if package_paths != previous_package_paths:
changed = True
if cmake_args != previous_cmake_args:
changed = True
if install is not None and install != previous_install:
install_toggled = True
# store current input for next invocation
with open(input_filename, 'w') as f:
f.write('%s\n%s\n%s' % (package_paths, cmake_args, install))
return changed, install_toggled
| [] | [] | [
"CMAKE_PREFIX_PATH",
"MAKEFLAGS",
"ROS_PARALLEL_JOBS"
] | [] | ["CMAKE_PREFIX_PATH", "MAKEFLAGS", "ROS_PARALLEL_JOBS"] | python | 3 | 0 | |
gvm/main_test.go | package main
import (
"fmt"
"os"
"path/filepath"
"reflect"
"testing"
)
func Test_unpackZip(t *testing.T) {
type args struct {
targetDir string
archiveFile string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "dl",
args: args{
targetDir: filepath.Join(os.Getenv("HOME"), ".gvm2", "dl"),
archiveFile: filepath.Join(os.Getenv("HOME"), ".gvm2", "dl", "dl.zip"),
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := unpackZip(tt.args.targetDir, tt.args.archiveFile); (err != nil) != tt.wantErr {
t.Errorf("unpackZip() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_dedupEnv(t *testing.T) {
type args struct {
env []string
}
tests := []struct {
name string
args args
want []string
}{
{
name: "Test_dedupEnv",
args: args{
[]string{"a=a", "b=b", "a=c"},
},
want: []string{"a=c", "b=b"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := dedupEnv(tt.args.env); !reflect.DeepEqual(got, tt.want) {
t.Errorf("dedupEnv() = %v, want %v", got, tt.want)
// ???????
// dedupEnv() = [ a=c b=b], want [a=c b=b]
}
})
}
}
| [
"\"HOME\"",
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
pkg/config/config.go | package config
import "os"
var (
instance *Config = &Config{
Line: &LineConfig{
ClientSecret: os.Getenv("TAISHO_LINE_SECRET"),
ClientAccessToken: os.Getenv("TAISHO_LINE_ACCESS_TOKEN"),
},
}
)
type Config struct {
Line *LineConfig
}
type LineConfig struct {
ClientSecret string
ClientAccessToken string
}
func Get() *Config {
return instance
}
| [
"\"TAISHO_LINE_SECRET\"",
"\"TAISHO_LINE_ACCESS_TOKEN\""
] | [] | [
"TAISHO_LINE_SECRET",
"TAISHO_LINE_ACCESS_TOKEN"
] | [] | ["TAISHO_LINE_SECRET", "TAISHO_LINE_ACCESS_TOKEN"] | go | 2 | 0 | |
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/customer_user_access_service/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v7.enums.types import access_role
from google.ads.googleads.v7.resources.types import customer_user_access
from google.ads.googleads.v7.services.types import customer_user_access_service
from .transports.base import CustomerUserAccessServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import CustomerUserAccessServiceGrpcTransport
class CustomerUserAccessServiceClientMeta(type):
"""Metaclass for the CustomerUserAccessService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[CustomerUserAccessServiceTransport]]
_transport_registry['grpc'] = CustomerUserAccessServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[CustomerUserAccessServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CustomerUserAccessServiceClient(metaclass=CustomerUserAccessServiceClientMeta):
"""This service manages the permissions of a user on a given
customer.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomerUserAccessServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomerUserAccessServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CustomerUserAccessServiceTransport:
"""Return the transport used by the client instance.
Returns:
CustomerUserAccessServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def customer_user_access_path(customer_id: str,user_id: str,) -> str:
"""Return a fully-qualified customer_user_access string."""
return "customers/{customer_id}/customerUserAccesses/{user_id}".format(customer_id=customer_id, user_id=user_id, )
@staticmethod
def parse_customer_user_access_path(path: str) -> Dict[str,str]:
"""Parse a customer_user_access path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/customerUserAccesses/(?P<user_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CustomerUserAccessServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the customer user access service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CustomerUserAccessServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CustomerUserAccessServiceTransport):
# transport is a CustomerUserAccessServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CustomerUserAccessServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_customer_user_access(self,
request: Union[customer_user_access_service.GetCustomerUserAccessRequest, dict] = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> customer_user_access.CustomerUserAccess:
r"""Returns the CustomerUserAccess in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v7.services.types.GetCustomerUserAccessRequest, dict]):
The request object. Request message for
[CustomerUserAccessService.GetCustomerUserAccess][google.ads.googleads.v7.services.CustomerUserAccessService.GetCustomerUserAccess].
resource_name (:class:`str`):
Required. Resource name of the
customer user access.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v7.resources.types.CustomerUserAccess:
Represents the permission of a single
user onto a single customer.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a customer_user_access_service.GetCustomerUserAccessRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, customer_user_access_service.GetCustomerUserAccessRequest):
request = customer_user_access_service.GetCustomerUserAccessRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_customer_user_access]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_customer_user_access(self,
request: Union[customer_user_access_service.MutateCustomerUserAccessRequest, dict] = None,
*,
customer_id: str = None,
operation: customer_user_access_service.CustomerUserAccessOperation = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> customer_user_access_service.MutateCustomerUserAccessResponse:
r"""Updates, removes permission of a user on a given customer.
Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CustomerUserAccessError <>`__
`FieldMaskError <>`__ `HeaderError <>`__ `InternalError <>`__
`MutateError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v7.services.types.MutateCustomerUserAccessRequest, dict]):
The request object. Mutate Request for
[CustomerUserAccessService.MutateCustomerUserAccess][google.ads.googleads.v7.services.CustomerUserAccessService.MutateCustomerUserAccess].
customer_id (:class:`str`):
Required. The ID of the customer
being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operation (:class:`google.ads.googleads.v7.services.types.CustomerUserAccessOperation`):
Required. The operation to perform on
the customer
This corresponds to the ``operation`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v7.services.types.MutateCustomerUserAccessResponse:
Response message for customer user
access mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operation]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a customer_user_access_service.MutateCustomerUserAccessRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, customer_user_access_service.MutateCustomerUserAccessRequest):
request = customer_user_access_service.MutateCustomerUserAccessRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operation is not None:
request.operation = operation
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_customer_user_access]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'CustomerUserAccessServiceClient',
)
| [] | [] | [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
] | [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
protons/scripts/run_simulation.py | import logging
import os
import signal
import toml
from typing import Dict
import sys
import netCDF4
from lxml import etree
from saltswap.wrappers import Salinator
from simtk import openmm as mm, unit
from tqdm import trange
from .. import app
from ..app import log, NCMCProtonDrive
from ..app.proposals import UniformSwapProposal
from ..app.driver import SAMSApproach
from .utilities import (
timeout_handler,
xml_to_topology,
deserialize_openmm_element,
deserialize_state_vector,
TimeOutError,
create_protons_checkpoint_file,
)
def run_main(jsonfile):
"""Main simulation loop."""
# TODO Validate yaml/json input with json schema?
settings = toml.load(open(jsonfile, "r"))
try:
format_vars: Dict[str, str] = settings["format_vars"]
except KeyError:
format_vars = dict()
# Retrieve runtime settings
run = settings["run"]
# Start timeout to enable clean exit on uncompleted runs
# Note, does not work on windows!
if os.name != "nt":
signal.signal(signal.SIGALRM, timeout_handler)
script_timeout = int(run["timeout_sec"])
signal.alarm(script_timeout)
# Input files
inp = settings["input"]
idir = inp["dir"].format(**format_vars)
input_checkpoint_file = os.path.abspath(
os.path.join(idir, inp["checkpoint"].format(**format_vars))
)
# Load checkpoint file
with open(input_checkpoint_file, "r") as checkpoint:
checkpoint_tree = etree.fromstring(checkpoint.read())
checkpoint_date = checkpoint_tree.attrib["date"]
log.info(f"Reading checkpoint from '{checkpoint_date}'.")
topology_element = checkpoint_tree.xpath("TopologyFile")[0]
topology: app.Topology = xml_to_topology(topology_element)
# Quick fix for histidines in topology
# Openmm relabels them HIS, which leads to them not being detected as
# titratable. Renaming them fixes this.
for residue in topology.residues():
if residue.name == "HIS":
residue.name = "HIP"
# TODO doublecheck if ASH GLH need to be renamed
elif residue.name == "ASP":
residue.name = "ASH"
elif residue.name == "GLU":
residue.name = "GLH"
# Naming the output files
out = settings["output"]
odir = out["dir"].format(**format_vars)
obasename = out["basename"].format(**format_vars)
runid = format_vars["run_idx"]
if not os.path.isdir(odir):
os.makedirs(odir)
lastdir = os.getcwd()
os.chdir(odir)
# File for resuming simulation
output_checkpoint_file = f"{obasename}-checkpoint-{runid}.xml"
# System Configuration
system_element = checkpoint_tree.xpath("System")[0]
system: mm.System = deserialize_openmm_element(system_element)
# Deserialize the integrator
integrator_element = checkpoint_tree.xpath("Integrator")[0]
integrator: mm.CompoundIntegrator = deserialize_openmm_element(integrator_element)
perturbations_per_trial = int(run["perturbations_per_ncmc_trial"])
propagations_per_step = int(run["propagations_per_ncmc_step"])
# Deserialize the proton drive
drive_element = checkpoint_tree.xpath("NCMCProtonDrive")[0]
temperature = float(drive_element.get("temperature_kelvin")) * unit.kelvin
if "pressure_bar" in drive_element.attrib:
pressure = float(drive_element.get("pressure_bar")) * unit.bar
else:
pressure = None
driver = NCMCProtonDrive(
temperature,
topology,
system,
pressure=pressure,
perturbations_per_trial=perturbations_per_trial,
propagations_per_step=propagations_per_step,
)
driver.state_from_xml_tree(drive_element)
if driver.calibration_state is not None:
if driver.calibration_state.approach == SAMSApproach.ONESITE:
driver.define_pools({"calibration": driver.calibration_state.group_index})
try:
platform = mm.Platform.getPlatformByName("CUDA")
properties = {
"CudaPrecision": "mixed",
"DeterministicForces": "true",
"CudaDeviceIndex": os.environ["CUDA_VISIBLE_DEVICES"],
}
except Exception as e:
message = str(e)
if message == 'There is no registered Platform called "CUDA"':
log.error(message)
log.warn("Resorting to default OpenMM platform and properties.")
platform = None
properties = None
else:
raise
simulation = app.ConstantPHSimulation(
topology,
system,
integrator,
driver,
platform=platform,
platformProperties=properties,
)
# Set the simulation state
state_element = checkpoint_tree.xpath("State")[0]
state: mm.State = deserialize_openmm_element(state_element)
boxvec = state.getPeriodicBoxVectors()
pos = state.getPositions()
vel = state.getVelocities()
simulation.context.setPositions(pos)
simulation.context.setPeriodicBoxVectors(*boxvec)
simulation.context.setVelocities(vel)
# Check if the system has an associated salinator
saltswap_element = checkpoint_tree.xpath("Saltswap")
if saltswap_element:
# Deserialiation workaround
saltswap_element = saltswap_element[0]
salt_concentration = (
float(saltswap_element.get("salt_concentration_molar")) * unit.molar
)
salinator = Salinator(
context=simulation.context,
system=system,
topology=topology,
ncmc_integrator=integrator.getIntegrator(1),
salt_concentration=salt_concentration,
pressure=pressure,
temperature=temperature,
)
swapper = salinator.swapper
deserialize_state_vector(saltswap_element, swapper)
# Assumes the parameters are already set and the ions are set if needed
# Don't set the charge rule
driver.swapper = swapper
driver.swap_proposal = UniformSwapProposal(cation_coefficient=0.5)
else:
salinator = None
# Add reporters
ncfile = netCDF4.Dataset(f"{obasename}-{runid}.nc", "w")
dcd_output_name = f"{obasename}-{runid}.dcd"
reporters = settings["reporters"]
if "metadata" in reporters:
simulation.update_reporters.append(app.MetadataReporter(ncfile))
if "coordinates" in reporters:
freq = int(reporters["coordinates"]["frequency"])
simulation.reporters.append(
app.DCDReporter(dcd_output_name, freq, enforcePeriodicBox=True)
)
if "titration" in reporters:
freq = int(reporters["titration"]["frequency"])
simulation.update_reporters.append(app.TitrationReporter(ncfile, freq))
if "sams" in reporters:
freq = int(reporters["sams"]["frequency"])
simulation.calibration_reporters.append(app.SAMSReporter(ncfile, freq))
if "ncmc" in reporters:
freq = int(reporters["ncmc"]["frequency"])
if "work_interval" in reporters["ncmc"]:
work_interval = int(reporters["ncmc"]["work_interval"])
else:
work_interval = 0
simulation.update_reporters.append(
app.NCMCReporter(ncfile, freq, work_interval)
)
total_iterations = int(run["total_update_attempts"])
md_steps_between_updates = int(run["md_steps_between_updates"])
# MAIN SIMULATION LOOP STARTS HERE
try:
for i in trange(total_iterations, desc="NCMC attempts"):
if i == 2:
log.info("Simulation seems to be working. Suppressing debugging info.")
log.setLevel(logging.INFO)
simulation.step(md_steps_between_updates)
# Perform a few COOH updates in between
driver.update("COOH", nattempts=3)
if driver.calibration_state is not None:
if driver.calibration_state.approach is SAMSApproach.ONESITE:
simulation.update(1, pool="calibration")
else:
simulation.update(1)
simulation.adapt()
else:
simulation.update(1)
except TimeOutError:
log.warn("Simulation ran out of time, saving current results.")
finally:
# export the context
create_protons_checkpoint_file(
output_checkpoint_file,
driver,
simulation.context,
simulation.system,
simulation.integrator,
topology_element.text,
salinator=salinator,
)
ncfile.close()
os.chdir(lastdir)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Please provide a single json file as input.")
else:
# Provide the json file to main function
run_main(sys.argv[1])
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
content/test/gpu/measure_power_intel.py | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs power measurements for browsers using Intel Power Gadget.
This script only works on Windows/Mac with Intel CPU. Intel Power Gadget needs
to be installed on the machine before this script works. The software can be
downloaded from:
https://software.intel.com/en-us/articles/intel-power-gadget
Newer IPG versions might also require Visual C++ 2010 runtime to be installed
on Windows:
https://www.microsoft.com/en-us/download/details.aspx?id=14632
Install selenium via pip: `pip install selenium`
And finally install the web drivers for Chrome (and Edge if needed):
http://chromedriver.chromium.org/downloads
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
Sample runs:
python measure_power_intel.py --browser=canary --duration=10 --delay=5
--verbose --url="https://www.youtube.com/watch?v=0XdS37Re1XQ"
--extra-browser-args="--no-sandbox --disable-features=UseSurfaceLayerForVideo"
It is recommended to test with optimized builds of Chromium e.g. these GN args:
is_debug = false
is_component_build = false
is_official_build = true # optimization similar to official builds
use_goma = true
enable_nacl = false
proprietary_codecs = true
ffmpeg_branding = "Chrome"
It might also help to disable unnecessary background services and to unplug the
power source some time before measuring. See "Computer setup" section here:
https://microsoftedge.github.io/videotest/2017-04/WebdriverMethodology.html
"""
import csv
import datetime
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
try:
from selenium import webdriver
except ImportError as error:
logging.error(
"This script needs selenium and appropriate web drivers to be installed.")
raise
import gpu_tests.ipg_utils as ipg_utils
CHROME_STABLE_PATH_WIN = (
r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe")
CHROME_BETA_PATH_WIN = (
r"C:\Program Files (x86)\Google\Chrome Beta\Application\chrome.exe")
CHROME_DEV_PATH_WIN = (
r"C:\Program Files (x86)\Google\Chrome Dev\Application\chrome.exe")
# The following two paths are relative to the LOCALAPPDATA
CHROME_CANARY_PATH_WIN = r"Google\Chrome SxS\Application\chrome.exe"
CHROMIUM_PATH_WIN = r"Chromium\Application\chrome.exe"
CHROME_STABLE_PATH_MAC = (
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome")
CHROME_BETA_PATH_MAC = CHROME_STABLE_PATH_MAC
CHROME_DEV_PATH_MAC = CHROME_STABLE_PATH_MAC
CHROME_CANARY_PATH_MAC = (
"/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary"
)
SUPPORTED_BROWSERS = ['stable', 'beta', 'dev', 'canary', 'chromium', 'edge']
def LocateBrowserWin(options_browser):
if options_browser == 'edge':
return 'edge'
browser = None
if not options_browser or options_browser == 'stable':
browser = CHROME_STABLE_PATH_WIN
elif options_browser == 'beta':
browser = CHROME_BETA_PATH_WIN
elif options_browser == 'dev':
browser = CHROME_DEV_PATH_WIN
elif options_browser == 'canary':
browser = os.path.join(os.getenv('LOCALAPPDATA'), CHROME_CANARY_PATH_WIN)
elif options_browser == 'chromium':
browser = os.path.join(os.getenv('LOCALAPPDATA'), CHROMIUM_PATH_WIN)
elif options_browser.endswith(".exe"):
browser = options_browser
else:
logging.warning("Invalid value for --browser")
logging.warning(
"Supported values: %s, or a full path to a browser executable.",
", ".join(SUPPORTED_BROWSERS))
return None
if not os.path.exists(browser):
logging.warning("Can't locate browser at " + browser)
logging.warning("Please pass full path to the executable in --browser")
return None
return browser
def LocateBrowserMac(options_browser):
browser = None
if not options_browser or options_browser == 'stable':
browser = CHROME_STABLE_PATH_MAC
elif options_browser == 'beta':
browser = CHROME_BETA_PATH_MAC
elif options_browser == 'dev':
browser = CHROME_DEV_PATH_MAC
elif options_browser == 'canary':
browser = CHROME_CANARY_PATH_MAC
elif options_browser.endswith("Chromium"):
browser = options_browser
else:
logging.warning("Invalid value for --browser")
logging.warning(
"Supported values: %s, or a full path to a browser executable.",
", ".join(SUPPORTED_BROWSERS))
return None
if not os.path.exists(browser):
logging.warning("Can't locate browser at " + browser)
logging.warning("Please pass full path to the executable in --browser")
return None
return browser
def LocateBrowser(options_browser):
if sys.platform == 'win32':
return LocateBrowserWin(options_browser)
if sys.platform == 'darwin':
return LocateBrowserMac(options_browser)
logging.warning("This script only runs on Windows/Mac.")
return None
def CreateWebDriver(browser, user_data_dir, url, fullscreen,
extra_browser_args):
if browser == 'edge':
driver = webdriver.Edge()
else:
options = webdriver.ChromeOptions()
options.binary_location = browser
options.add_argument('--user-data-dir=%s' % user_data_dir)
options.add_argument('--no-first-run')
options.add_argument('--no-default-browser-check')
options.add_argument('--autoplay-policy=no-user-gesture-required')
options.add_argument('--start-maximized')
for arg in extra_browser_args:
options.add_argument(arg)
logging.debug(" ".join(options.arguments))
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(30)
if url is not None:
driver.get(url)
if fullscreen:
try:
video_el = driver.find_element_by_tag_name('video')
actions = webdriver.ActionChains(driver)
actions.move_to_element(video_el)
actions.double_click(video_el)
actions.perform()
except:
logging.warning('Could not locate video element to make fullscreen')
return driver
def MeasurePowerOnce(browser, logfile, duration, delay, resolution, url,
fullscreen, extra_browser_args):
logging.debug("Logging into " + logfile)
user_data_dir = tempfile.mkdtemp()
driver = CreateWebDriver(browser, user_data_dir, url, fullscreen,
extra_browser_args)
ipg_utils.RunIPG(duration + delay, resolution, logfile)
driver.quit()
try:
shutil.rmtree(user_data_dir)
except Exception as err:
logging.warning("Failed to remove temporary folder: " + user_data_dir)
logging.warning("Please kill browser and remove it manually to avoid leak")
logging.debug(err)
results = ipg_utils.AnalyzeIPGLogFile(logfile, delay)
return results
def main(argv):
parser = optparse.OptionParser()
parser.add_option(
"--browser",
help=("select which browser to run. Options include: " +
", ".join(SUPPORTED_BROWSERS) +
", or a full path to a browser executable. " +
"By default, stable is selected."))
parser.add_option(
"--duration",
default=60,
type="int",
help="specify how many seconds Intel Power Gadget "
"measures. By default, 60 seconds is selected.")
parser.add_option(
"--delay",
default=10,
type="int",
help="specify how many seconds we skip in the data "
"Intel Power Gadget collects. This time is for starting "
"video play, switching to fullscreen mode, etc. "
"By default, 10 seconds is selected.")
parser.add_option(
"--resolution",
default=100,
type="int",
help="specify how often Intel Power Gadget samples "
"data in milliseconds. By default, 100 ms is selected.")
parser.add_option(
"--logdir",
help="specify where Intel Power Gadget stores its log."
"By default, it is the current path.")
parser.add_option(
"--logname",
help="specify the prefix for Intel Power Gadget log "
"filename. By default, it is PowerLog.")
parser.add_option(
"-v",
"--verbose",
action="store_true",
default=False,
help="print out debug information.")
parser.add_option(
"--repeat",
default=1,
type="int",
help="specify how many times to run the measurements.")
parser.add_option(
"--url", help="specify the webpage URL the browser launches with.")
parser.add_option(
"--extra-browser-args",
dest="extra_browser_args",
help="specify extra command line switches for the browser "
"that are separated by spaces (quoted).")
parser.add_option(
"--extra-browser-args-filename",
dest="extra_browser_args_filename",
metavar="FILE",
help="specify extra command line switches for the browser "
"in a text file that are separated by whitespace.")
parser.add_option(
"--fullscreen",
action="store_true",
default=False,
help="specify whether video should be made fullscreen.")
(options, _) = parser.parse_args(args=argv)
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
browser = LocateBrowser(options.browser)
if not browser:
return
# TODO(zmo): Add code to disable a bunch of Windows services that might
# affect power consumption.
log_prefix = options.logname or 'PowerLog'
all_results = []
extra_browser_args = []
if options.extra_browser_args:
extra_browser_args = options.extra_browser_args.split()
if options.extra_browser_args_filename:
if not os.path.isfile(options.extra_browser_args_filename):
logging.error("Can't locate file at %s",
options.extra_browser_args_filename)
else:
with open(options.extra_browser_args_filename, 'r') as file:
extra_browser_args.extend(file.read().split())
file.close()
for run in range(1, options.repeat + 1):
logfile = ipg_utils.GenerateIPGLogFilename(log_prefix, options.logdir, run,
options.repeat, True)
print "Iteration #%d out of %d" % (run, options.repeat)
results = MeasurePowerOnce(browser, logfile, options.duration,
options.delay, options.resolution, options.url,
options.fullscreen, extra_browser_args)
print results
all_results.append(results)
now = datetime.datetime.now()
results_filename = '%s_%s_results.csv' % (log_prefix,
now.strftime('%Y%m%d%H%M%S'))
try:
with open(results_filename, 'wb') as results_csv:
labels = sorted(all_results[0].keys())
w = csv.DictWriter(results_csv, fieldnames=labels)
w.writeheader()
w.writerows(all_results)
except Exception as err:
logging.warning('Failed to write results file ' + results_filename)
logging.debug(err)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [] | [] | [
"LOCALAPPDATA"
] | [] | ["LOCALAPPDATA"] | python | 1 | 0 | |
ted_adapter/test_adapter.py | # Copyright (c) 2018 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module for running tests for MediaSDK open source
This module gets binary files of CI MediaSDK build
from share folder and tests them by 'ted'
"""
import sys
import argparse
import shutil
import subprocess
import os
import pathlib
import tarfile
import traceback
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
import adapter_conf
from common.mediasdk_directories import MediaSdkDirectories
from common.helper import TestReturnCodes, Product_type, Build_type, Build_event, rotate_dir
from smoke_test.config import LOG_PATH, LOG_NAME
class TedAdapter(object):
"""
Wrapper for 'ted'
"""
#TODO: add relevant path and delete it
test_driver_dir = pathlib.Path('/localdisk/bb/worker/infrastructure') #TODO: hardcoded path
test_results_dir = test_driver_dir / 'ted/results'
dispatcher_dir = adapter_conf.MEDIASDK_PATH / 'lib64'
tests_timeout = 300 # 5 minutes
def __init__(self, build_artifacts_dir, tests_artifacts_dir, tests_artifacts_url, root_dir):
"""
:param build_artifacts_dir: Path to build artifacts
:type build_artifacts_dir: pathlib.Path
:param tests_artifacts_dir: Path to tests artifacts
:type tests_artifacts_dir: pathlib.Path
:param tests_artifacts_url: URL to tests artifacts
:type tests_artifacts_url: String
:param root_dir: Path to workdir for unpacking build artifacts
:type root_dir: pathlib.Path
"""
self.build_artifacts_dir = build_artifacts_dir
self.tests_artifacts_dir = tests_artifacts_dir
self.tests_artifacts_url = tests_artifacts_url
self.root_dir = root_dir
self.env = os.environ.copy()
# Path to dispatcher lib should be in the libraries search path
self.env['LD_LIBRARY_PATH'] = self.dispatcher_dir
def _get_artifacts(self):
"""
Get artifacts archive from share
and extract them
:return: None
"""
pkg_name = 'install_pkg.tar.gz'
remote_pkg = self.build_artifacts_dir / pkg_name
#TODO: implement exceptions
# Clean workdir and re-create it
self._remove(str(self.root_dir))
self._mkdir(str(self.root_dir))
# Copy `install_pkg.tar` to the workdir and untar it
self._copy(str(remote_pkg), str(self.root_dir))
self._untar(str(self.root_dir / pkg_name), str(self.root_dir))
# Remove old `/opt/intel/mediasdk` and copy fresh built artifacts
self._remove(str(adapter_conf.MEDIASDK_PATH), sudo=False)
self._copy(str(self.root_dir / 'opt' / 'intel' / 'mediasdk'), str(adapter_conf.MEDIASDK_PATH), sudo=False)
def run_test(self):
"""
'Ted' runner
:return: Count of failed cases
:rtype: Integer | Exception
"""
self._get_artifacts()
# Path to mediasdk fodler which will be tested
self.env['MFX_HOME'] = adapter_conf.MEDIASDK_PATH
# Path to the folder lib64 where located driver
self.env['LIBVA_DRIVERS_PATH'] = adapter_conf.DRIVER_PATH
process = subprocess.run('python3 ted/ted.py',
shell=True,
cwd=self.test_driver_dir,
env=self.env,
timeout=self.tests_timeout,
encoding='utf-8',
errors='backslashreplace')
return process.returncode
def run_fei_tests(self):
"""
'hevc_fei_smoke_test' runner
:return: SUCCESS = 0, ERROR_TEST_FAILED = 1, ERROR_ACCESS_DENIED = 2
:rtype: Integer | Exception
"""
print(f'Running hevc fei smoke tests...', flush=True)
process = subprocess.run(f'python3 ../smoke_test/hevc_fei_smoke_test.py',
shell=True,
env=self.env,
timeout=self.tests_timeout,
encoding='utf-8',
errors='backslashreplace')
return process.returncode
def copy_logs_to_share(self):
rotate_dir(self.tests_artifacts_dir)
print(f'Copy results to {self.tests_artifacts_dir}')
print(f'Artifacts are available by: {self.tests_artifacts_url}')
# Workaround for copying to samba share on Linux to avoid exceptions while setting Linux permissions.
_orig_copystat = shutil.copystat
shutil.copystat = lambda x, y, follow_symlinks=True: x
shutil.copytree(self.test_results_dir, self.tests_artifacts_dir, ignore=shutil.ignore_patterns('bin'))
shutil.copyfile(LOG_PATH, str(self.tests_artifacts_dir / LOG_NAME))
shutil.copystat = _orig_copystat
# Direct calls of rm, cp commands needs to use them with `sudo`
# because we need to copy CI build artifacts to the
# `/opt/intel/mediasdk`
# Note: user should be sudoer without asking the password!
def _remove(self, directory: str, sudo=False):
return self._execute_command(f"rm -rf {directory}", sudo)
def _copy(self, target_directory: str, destination_directory: str, sudo=False):
return self._execute_command(f"cp -r {target_directory} {destination_directory}", sudo)
# TODO use extract_archive() from common.helper
def _untar(self, archive_path, destination_path):
with tarfile.open(archive_path, 'r:gz') as archive:
archive.extractall(path=destination_path)
def _mkdir(self, path):
return self._execute_command(f"mkdir -p {path}")
def _execute_command(self, command, sudo=False):
prefix = "sudo" if sudo else ""
process = subprocess.run(f"{prefix} {command}",
shell=True,
timeout=self.tests_timeout,
encoding='utf-8',
errors='backslashreplace')
return process.returncode
def _driver_exists():
return (adapter_conf.DRIVER_PATH / adapter_conf.DRIVER).exists()
def check_driver():
if not _driver_exists():
print(f"Driver was not found in this location: {adapter_conf.DRIVER_PATH}")
print(f"Install the driver and run ted again.")
exit(1)
def main():
"""
Tests runner
:return: None
"""
#Check existence of driver
check_driver()
parser = argparse.ArgumentParser(prog="test_adapter.py",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--version", action="version", version="%(prog)s 1.0")
parser.add_argument('-br', "--branch", metavar="String", required=True,
help="Branch of triggered repository")
parser.add_argument('-e', "--build-event", default='commit',
choices=[build_event.value for build_event in Build_event],
help='Event of commit')
parser.add_argument('-c', "--commit-id", metavar="String", required=True,
help="SHA of triggered commit")
parser.add_argument('-p', "--product-type", default='closed_linux',
choices=[product_type.value for product_type in Product_type],
help='Type of product')
parser.add_argument('-b', "--build-type", default='release',
choices=[build_type.value for build_type in Build_type],
help='Type of build')
parser.add_argument('-d', "--root-dir", metavar="PATH", required=True,
help="Path to worker directory")
args = parser.parse_args()
directories_layout = {
'branch': args.branch,
'build_event': args.build_event,
'commit_id': args.commit_id,
'product_type': args.product_type,
'build_type': args.build_type,
}
build_artifacts_dir = MediaSdkDirectories.get_build_dir(**directories_layout)
tests_artifacts_dir = MediaSdkDirectories.get_test_dir(**directories_layout)
tests_artifacts_url = MediaSdkDirectories.get_test_url(**directories_layout)
adapter = TedAdapter(build_artifacts_dir, tests_artifacts_dir, tests_artifacts_url, root_dir=pathlib.Path(args.root_dir))
try:
tests_return_code = adapter.run_test()
except Exception:
print("Exception occurred:\n", traceback.format_exc())
# TODO return json string
tests_return_code = TestReturnCodes.INFRASTRUCTURE_ERROR.value
try:
tests_return_code |= adapter.run_fei_tests()
except Exception:
print("Exception occurred:\n", traceback.format_exc())
# TODO return json string
tests_return_code |= TestReturnCodes.INFRASTRUCTURE_ERROR.value
try:
adapter.copy_logs_to_share()
except Exception:
print("Exception occurred while copying results:\n", traceback.format_exc())
tests_return_code |= TestReturnCodes.INFRASTRUCTURE_ERROR.value
exit(tests_return_code)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
lib/scionutil/initialization.go | // Copyright 2018 ETH Zurich
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.package main
package scionutil
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/scionproto/scion/go/lib/addr"
"github.com/scionproto/scion/go/lib/sciond"
"github.com/scionproto/scion/go/lib/snet"
)
const localhost = "localhost"
// InitSCION initializes the default SCION networking context with the provided SCION address
// and the default SCIOND/SCION dispatcher
func InitSCION(localAddr *snet.Addr) error {
err := snet.Init(localAddr.IA, GetSCIONDPath(&localAddr.IA), GetDefaultDispatcher())
if err != nil {
return err
}
return nil
}
// InitSCIONString initializes the default SCION networking context with provided SCION address in string format
// and the default SCIOND/SCION dispatcher
func InitSCIONString(localAddr string) (*snet.Addr, error) {
addr, err := snet.AddrFromString(localAddr)
if err != nil {
return nil, err
}
return addr, InitSCION(addr)
}
// GetSCIONDPath returns the path to the SCION socket.
func GetSCIONDPath(ia *addr.IA) string {
// Use default.sock if exists:
if _, err := os.Stat(sciond.DefaultSCIONDPath); err == nil {
return sciond.DefaultSCIONDPath
}
// otherwise, use socket with ia name:
return sciond.GetDefaultSCIONDPath(ia)
}
// GetDefaultDispatcher returns the path to the default SCION dispatcher
func GetDefaultDispatcher() string {
return "/run/shm/dispatcher/default.sock"
}
// GetLocalhost returns a local SCION address an application can bind to
func GetLocalhost() (*snet.Addr, error) {
str, err := GetLocalhostString()
if err != nil {
return nil, err
}
addr, err := snet.AddrFromString(str)
if err != nil {
return nil, err
}
return addr, nil
}
// GetLocalhostString returns a local SCION address an application can bind to
func GetLocalhostString() (string, error) {
var ia addr.IA
var l3 addr.HostAddr
var err error
// see if 'localhost' is defined in hostsfile
ia, l3, err = GetHostByName(localhost)
if err == nil {
return fmt.Sprintf("%s,[%s]", ia, l3), nil
}
// otherwise return ISD-AS and loopback IP
sc := os.Getenv("SC")
b, err := ioutil.ReadFile(filepath.Join(sc, "gen/ia"))
if err != nil {
return "", err
}
ia, err = addr.IAFromFileFmt(string(b), false)
if err != nil {
return "", err
}
return fmt.Sprintf("%s,[127.0.0.1]", ia), nil
}
| [
"\"SC\""
] | [] | [
"SC"
] | [] | ["SC"] | go | 1 | 0 | |
datasources/db_mariadb.go | package datasources
import (
"fmt"
"log"
"os"
"strconv"
"time"
"github.com/jinzhu/gorm"
// Import mysql driver
_ "github.com/go-sql-driver/mysql"
)
var (
// DBMain for main DB connection
DBMain *gorm.DB
// DBMainConnStr for main DB connection
DBMainConnStr string
)
// DateTimeFormat Long date time mysql format
const DateTimeFormat = "2006-01-02 15:04:05"
// DateFormat Date mysql format
const DateFormat = "2006-01-02"
// TimeFormat Time mysql format
const TimeFormat = "15:04:05"
// mariadbConfig for init connection
type mariadbConfig struct {
// Optional.
Username, Password string
// Host of the mariadb instance.
//
// If set, UnixSocket should be unset.
Host string
// Port of the mariadb instance.
//
// If set, UnixSocket should be unset.
Port int
// UnixSocket is the filepath to a unix socket.
//
// If set, Host and Port should be unset.
UnixSocket string
}
// mariadbDStoreString returns a connection string suitable for sql.Open.
func (c mariadbConfig) mariadbDStoreString(databaseName string) string {
var cred string
// [username[:password]@]
if c.Username != "" {
cred = c.Username
if c.Password != "" {
cred = cred + ":" + c.Password
}
cred = cred + "@"
}
if c.UnixSocket != "" {
return fmt.Sprintf("%sunix(%s)/%s", cred, c.UnixSocket, databaseName)
}
return fmt.Sprintf("%stcp([%s]:%d)/%s", cred, c.Host, c.Port, databaseName)
}
// NewMariadbDB creates a new database connection backed by a given mariadb server.
func NewMariadbDB(dbname string, username string, password string, host string, portStr string, socket string, maxIdleConns int, maxOpenConns int, parseTime bool) (conn *gorm.DB, connStr string, err error) {
// Use system default database if empty
if len(dbname) == 0 {
dbname = os.Getenv("DB_NAME")
}
port, err := strconv.Atoi(portStr)
if err != nil {
return nil, connStr, fmt.Errorf("mariadb: port must be number string: %v", err)
}
dbConnOption := mariadbConfig{
Username: username,
Password: password,
Host: host,
Port: port,
UnixSocket: socket,
}
connStr = dbConnOption.mariadbDStoreString(dbname)
connStr = connStr + "?loc=Asia%2FBangkok"
connStr = connStr + "&charset=utf8mb4,utf8"
if parseTime {
connStr = connStr + "&parseTime=true"
}
// Use system default database if empty
if len(connStr) == 0 {
return nil, connStr, fmt.Errorf("mariadb: connStr needed")
}
// Open connection to database
conn, err = gorm.Open("mysql", connStr)
if err != nil {
return nil, connStr, fmt.Errorf("mariadb: could not get a connection: %v", err)
}
// Set max open connection at time
if maxOpenConns > 0 {
conn.DB().SetMaxOpenConns(maxOpenConns)
} else {
// Default value follow mariadb.js pool
conn.DB().SetMaxOpenConns(10)
}
// Set max idle connection at time
if maxIdleConns > 0 {
conn.DB().SetMaxIdleConns(maxIdleConns)
} else {
// Default value follow mariadb.js pool
conn.DB().SetMaxIdleConns(5)
}
// Time out for long connection
// follow mariadb.js pool
conn.DB().SetConnMaxLifetime(1800 * time.Second)
return
}
// GetDBMainConn get db connection
func GetDBMainConn() (conn *gorm.DB, err error) {
checkPing := DBMain.DB().Ping()
if checkPing != nil {
log.Printf("MariaDB: %+v", checkPing)
DBMain, err = gorm.Open("mysql", DBMainConnStr)
if err != nil {
log.Printf("MariaDB: %+v", err)
return nil, fmt.Errorf("MariaDB: could not get a connection: %v", err)
}
}
return DBMain, err
}
| [
"\"DB_NAME\""
] | [] | [
"DB_NAME"
] | [] | ["DB_NAME"] | go | 1 | 0 | |
session/vote/main.go | package main
import (
"context"
"encoding/json"
"os"
"strings"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/jonsabados/goauth/aws"
"github.com/rs/zerolog"
"github.com/jonsabados/pointypoints/api"
"github.com/jonsabados/pointypoints/cors"
"github.com/jonsabados/pointypoints/lambdautil"
"github.com/jonsabados/pointypoints/logging"
"github.com/jonsabados/pointypoints/profile"
"github.com/jonsabados/pointypoints/session"
)
func NewHandler(prepareLogs logging.Preparer, corsHeaders cors.ResponseHeaderBuilder, loadSession session.Loader, recordVote session.VoteRecorder, notifyParticipants session.ChangeNotifier) func(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
return func(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
ctx = prepareLogs(ctx)
r := new(session.VoteRequest)
err := json.Unmarshal([]byte(request.Body), r)
if err != nil {
zerolog.Ctx(ctx).Warn().Err(err).Msg("error reading load request body")
return api.NewInternalServerError(ctx, corsHeaders(ctx, request.Headers)), nil
}
// if requests made it to the lambda without a session or connection path param things have gone wrong and a panic is OK
sessionID := request.PathParameters["session"]
sess, err := loadSession(ctx, sessionID)
if err != nil {
zerolog.Ctx(ctx).Error().Err(err).Msg("error reading session")
return api.NewPermissionDeniedResponse(ctx, corsHeaders(ctx, request.Headers)), nil
}
if sess == nil {
zerolog.Ctx(ctx).Warn().Str("sessionID", sessionID).Msg("session not found")
return api.NewPermissionDeniedResponse(ctx, corsHeaders(ctx, request.Headers)), nil
}
zerolog.Ctx(ctx).Debug().Interface("session", sess).Msg("loaded session")
userID := request.PathParameters["user"]
var user *session.User
userType := session.Participant
if sess.FacilitatorPoints && sess.Facilitator.UserID == userID {
userType = session.Facilitator
user = &sess.Facilitator
} else {
for i := 0; i < len(sess.Participants); i++ {
if sess.Participants[i].UserID == userID {
user = &sess.Participants[i]
break
}
}
}
if user == nil {
return api.NewPermissionDeniedResponse(ctx, corsHeaders(ctx, request.Headers)), nil
}
user.CurrentVote = &r.Vote
principal, err := aws.ExtractPrincipal(request)
if err != nil {
zerolog.Ctx(ctx).Warn().Err(err).Msg("error extracting principal")
return api.NewInternalServerError(ctx, corsHeaders(ctx, request.Headers)), nil
}
err = recordVote(ctx, principal, sessionID, *user, userType)
if err != nil {
zerolog.Ctx(ctx).Error().Err(err).Msg("error saving session")
return api.NewInternalServerError(ctx, corsHeaders(ctx, request.Headers)), nil
}
err = notifyParticipants(ctx, *sess)
if err != nil {
zerolog.Ctx(ctx).Error().Err(err).Msg("error notifying participants")
return api.NewInternalServerError(ctx, corsHeaders(ctx, request.Headers)), nil
}
return api.NewNoContentResponse(ctx, corsHeaders(ctx, request.Headers)), nil
}
}
func main() {
lambdautil.CoreStartup()
logPreparer := logging.NewPreparer()
sess := lambdautil.DefaultAWSConfig()
statsFactory := profile.NewStatsUpdateFactory(lambdautil.ProfileTable)
dynamo := lambdautil.NewDynamoClient(sess)
loader := session.NewLoader(dynamo, lambdautil.SessionTable)
voteRecorder := session.NewVoteRecorder(dynamo, lambdautil.SessionTable, lambdautil.SessionTimeout, statsFactory)
notifier := session.NewChangeNotifier(dynamo, lambdautil.SessionTable, lambdautil.NewProdMessageDispatcher())
allowedDomains := strings.Split(os.Getenv("ALLOWED_ORIGINS"), ",")
lambda.Start(NewHandler(logPreparer, cors.NewResponseHeaderBuilder(allowedDomains), loader, voteRecorder, notifier))
}
| [
"\"ALLOWED_ORIGINS\""
] | [] | [
"ALLOWED_ORIGINS"
] | [] | ["ALLOWED_ORIGINS"] | go | 1 | 0 | |
service/gmail_test.go | package service
import (
"bytes"
"encoding/json"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"log"
"net/http"
"net/http/httptest"
"os"
)
var (
credentialJSON = os.Getenv("GMAIL_CREDENTIAL_JSON")
emailAddress = os.Getenv("GMAIL_EMAIL_ADDRESS")
)
var _ = Describe("AccessToken with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/authorization", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(AccessToken)
handler.ServeHTTP(recorder, request)
Describe("Authorization", func() {
Context("authorization", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("AccessToken with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/authorization", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(AccessToken)
handler.ServeHTTP(recorder, request)
Describe("Authorization", func() {
Context("authorization", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Refresh Token with invalid CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/refreshToken", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(RefreshToken)
handler.ServeHTTP(recorder, request)
Describe("Refresh Token", func() {
Context("Refresh Token", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Refresh Token with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/refreshToken", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(RefreshToken)
handler.ServeHTTP(recorder, request)
Describe("Refresh Token", func() {
Context("Refresh Token", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Refresh Token with invalid token object", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
tok := Token{AccessToken: "mockAccessToken", TokenType: "mockTokenType", Expiry: "mockExpiry"}
gmail := GmailArgument{TokenObj: tok}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/refreshToken", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(RefreshToken)
handler.ServeHTTP(recorder, request)
Describe("Refresh Token", func() {
Context("Refresh Token", func() {
It("Should result http.StatusOK", func() {
Expect(http.StatusOK).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("HealthCheck", func() {
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/health", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(HealthCheck)
handler.ServeHTTP(recorder, request)
Describe("Health Check", func() {
Context("health check", func() {
It("Should result http.StatusOK", func() {
Expect(http.StatusOK).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Authorization with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/authorization", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(Authorization)
handler.ServeHTTP(recorder, request)
Describe("Authorization", func() {
Context("authorization", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Authorization with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/authorization", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(Authorization)
handler.ServeHTTP(recorder, request)
Describe("Authorization", func() {
Context("authorization", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Authorization with valid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/authorization", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(Authorization)
handler.ServeHTTP(recorder, request)
Describe("Authorization", func() {
Context("authorization", func() {
It("Should result http.StatusOK", func() {
Expect(http.StatusOK).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("AccessToken with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/authorization", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(AccessToken)
handler.ServeHTTP(recorder, request)
Describe("Authorization", func() {
Context("authorization", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("AccessToken with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/authorization", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(AccessToken)
handler.ServeHTTP(recorder, request)
Describe("Authorization", func() {
Context("authorization", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("AccessToken with invalid auth code", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailArgument{AuthorizationCode: "mockAuthCode"}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/accessToken", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(AccessToken)
handler.ServeHTTP(recorder, request)
Describe("Access Token", func() {
Context("access Token", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Send Mail with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/sendMail", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMail)
handler.ServeHTTP(recorder, request)
Describe("Access Token", func() {
Context("access Token", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Access Token with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/sendMail", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMail)
handler.ServeHTTP(recorder, request)
Describe("Access Token", func() {
Context("access Token", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Send mail with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
toList := []string{emailAddress}
gmail := GmailArgument{AccessToken: "mockAccessToken", UserID: emailAddress, To: toList, Subject: "Test Subject", Body: "Mail body goes here"}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/sendMail", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMail)
handler.ServeHTTP(recorder, request)
Describe("Access Token", func() {
Context("access Token", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Subscribe gmail account for new incoming message", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
sub := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
err := json.NewEncoder(requestBody).Encode(sub)
if err != nil {
fmt.Println(" request err :", err)
}
req, err := http.NewRequest("POST", "/receive", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(ReceiveMail)
handler.ServeHTTP(recorder, req)
Describe("Subscribe", func() {
Context("Subscribe", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Subscribe gmail account for new incoming message", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
data := RequestParam{UserID: emailAddress, AccessToken: "mockAccessToken"}
sub := Subscribe{Endpoint: "https://webhook.site/3cee781d-0a87-4966-bdec-9635436294e9",
ID: "1",
IsTesting: true,
Data: data,
}
requestBody := new(bytes.Buffer)
err := json.NewEncoder(requestBody).Encode(sub)
if err != nil {
fmt.Println(" request err :", err)
}
req, err := http.NewRequest("POST", "/receive", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(ReceiveMail)
handler.ServeHTTP(recorder, req)
Describe("Subscribe", func() {
Context("Subscribe", func() {
It("Should result http.StatusOK", func() {
Expect(http.StatusOK).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Create label with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/createLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(CreateLabel)
handler.ServeHTTP(recorder, request)
Describe("Create Label", func() {
Context("Create label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Create label with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/createLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(CreateLabel)
handler.ServeHTTP(recorder, request)
Describe("Create Label", func() {
Context("Create label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Create label with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailArgument{UserID: emailAddress}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/createLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(CreateLabel)
handler.ServeHTTP(recorder, request)
Describe("Create Label", func() {
Context("Create label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Delete label with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/deleteLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(DeleteLabel)
handler.ServeHTTP(recorder, request)
Describe("Delete Label", func() {
Context("delete label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Delete label with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/deleteLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(DeleteLabel)
handler.ServeHTTP(recorder, request)
Describe("Delete Label", func() {
Context("delete label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Delete label with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailArgument{UserID: emailAddress, LabelID: "mockLableID"}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/deleteLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(DeleteLabel)
handler.ServeHTTP(recorder, request)
Describe("Delete Label", func() {
Context("Delete label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Patch label with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/patchLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(PatchLabel)
handler.ServeHTTP(recorder, request)
Describe("Patch Label", func() {
Context("patch label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Patch label with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/patchLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(PatchLabel)
handler.ServeHTTP(recorder, request)
Describe("Patch Label", func() {
Context("patch label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Patch label with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailArgument{UserID: emailAddress}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/patchLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(PatchLabel)
handler.ServeHTTP(recorder, request)
Describe("Patch Label", func() {
Context("Patch label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("List label with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/labelList", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(ListLabel)
handler.ServeHTTP(recorder, request)
Describe("List Label", func() {
Context("List label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("List label with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/labelList", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(ListLabel)
handler.ServeHTTP(recorder, request)
Describe("List Label", func() {
Context("list label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("List label with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailArgument{UserID: emailAddress}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/listLabel", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(ListLabel)
handler.ServeHTTP(recorder, request)
Describe("List Label", func() {
Context("List label", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Create filter with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailFilter{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/createFilter", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(CreateFilter)
handler.ServeHTTP(recorder, request)
Describe("Create Filter", func() {
Context("Create Filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Create Filter with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/createFilter", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(CreateFilter)
handler.ServeHTTP(recorder, request)
Describe("Create Filter", func() {
Context("create filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Create Filter with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailFilter{UserID: emailAddress, From: emailAddress}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/createFilter", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(CreateFilter)
handler.ServeHTTP(recorder, request)
Describe("Create Filter", func() {
Context("create filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Delete filter with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailFilter{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/deleteFilter", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(DeleteFilter)
handler.ServeHTTP(recorder, request)
Describe("Delete Filter", func() {
Context("delete filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Delete Filter with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/deleteFilter", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(DeleteFilter)
handler.ServeHTTP(recorder, request)
Describe("Delete Filter", func() {
Context("delete filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("Delete Filter with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailFilter{UserID: emailAddress, FilterID: "mockFliterID"}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/deleteFilter", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(DeleteFilter)
handler.ServeHTTP(recorder, request)
Describe("Delete Filter", func() {
Context("delete filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("List filter with invalid base64 CREDENTIAL_JSON", func() {
os.Setenv("CREDENTIAL_JSON", "mockJSON")
gmail := GmailArgument{}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/filterList", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(FilterList)
handler.ServeHTTP(recorder, request)
Describe("List Filter", func() {
Context("List filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("List filter with invalid args", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/filterList", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(FilterList)
handler.ServeHTTP(recorder, request)
Describe("List Filter", func() {
Context("List filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
var _ = Describe("List filter with valid arg", func() {
os.Setenv("CREDENTIAL_JSON", credentialJSON)
gmail := GmailArgument{UserID: emailAddress}
requestBody := new(bytes.Buffer)
jsonErr := json.NewEncoder(requestBody).Encode(gmail)
if jsonErr != nil {
log.Fatal(jsonErr)
}
request, err := http.NewRequest("POST", "/filterList", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(FilterList)
handler.ServeHTTP(recorder, request)
Describe("List Filter", func() {
Context("List filter", func() {
It("Should result http.StatusBadRequest", func() {
Expect(http.StatusBadRequest).To(Equal(recorder.Code))
})
})
})
})
| [
"\"GMAIL_CREDENTIAL_JSON\"",
"\"GMAIL_EMAIL_ADDRESS\""
] | [] | [
"GMAIL_EMAIL_ADDRESS",
"GMAIL_CREDENTIAL_JSON"
] | [] | ["GMAIL_EMAIL_ADDRESS", "GMAIL_CREDENTIAL_JSON"] | go | 2 | 0 | |
config/wsgi.py | """
WSGI config for promun project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'promun.config.settings.production')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
test/unit/test_serving.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import patch, MagicMock
import numpy as np
import pytest
import os
from sklearn.base import BaseEstimator
from sagemaker_containers.beta.framework import (content_types, encoders, errors)
from sagemaker_sklearn_container import serving
from sagemaker_sklearn_container.serving import default_model_fn, import_module
@pytest.fixture(scope='module', name='np_array')
def fixture_np_array():
return np.ones((2, 2))
class FakeEstimator(BaseEstimator):
def __init__(self):
pass
@staticmethod
def predict(input):
return
def dummy_execution_parameters_fn():
return {'dummy': 'dummy'}
class DummyUserModule:
def __init__(self):
self.execution_parameters_fn = dummy_execution_parameters_fn
def model_fn(self, model_dir):
pass
@pytest.mark.parametrize(
'json_data, expected', [
('[42, 6, 9]', np.array([42, 6, 9])),
('[42.0, 6.0, 9.0]', np.array([42., 6., 9.])),
('["42", "6", "9"]', np.array(['42', '6', '9'], dtype=np.float32)),
(u'["42", "6", "9"]', np.array([u'42', u'6', u'9'], dtype=np.float32))])
def test_input_fn_json(json_data, expected):
actual = serving.default_input_fn(json_data, content_types.JSON)
np.testing.assert_equal(actual, expected)
@pytest.mark.parametrize(
'csv_data, expected', [
('42\n6\n9\n', np.array([42, 6, 9], dtype=np.float32)),
('42.0\n6.0\n9.0\n', np.array([42., 6., 9.], dtype=np.float32)),
('42\n6\n9\n', np.array([42, 6, 9], dtype=np.float32))])
def test_input_fn_csv(csv_data, expected):
deserialized_np_array = serving.default_input_fn(csv_data, content_types.CSV)
assert np.array_equal(expected, deserialized_np_array)
@pytest.mark.parametrize('np_array', ([42, 6, 9], [42., 6., 9.]))
def test_input_fn_npz(np_array):
input_data = encoders.array_to_npy(np_array)
deserialized_np_array = serving.default_input_fn(input_data, content_types.NPY)
assert np.array_equal(np_array, deserialized_np_array)
float_32_array = np.array(np_array, dtype=np.float32)
input_data = encoders.array_to_npy(float_32_array)
deserialized_np_array = serving.default_input_fn(input_data, content_types.NPY)
assert np.array_equal(float_32_array, deserialized_np_array)
float_64_array = np.array(np_array, dtype=np.float64)
input_data = encoders.array_to_npy(float_64_array)
deserialized_np_array = serving.default_input_fn(input_data, content_types.NPY)
assert np.array_equal(float_64_array, deserialized_np_array)
def test_input_fn_bad_content_type():
with pytest.raises(errors.UnsupportedFormatError):
serving.default_input_fn('', 'application/not_supported')
def test_default_model_fn():
with pytest.raises(NotImplementedError):
default_model_fn('model_dir')
def test_predict_fn(np_array):
mock_estimator = FakeEstimator()
with patch.object(mock_estimator, 'predict') as mock:
serving.default_predict_fn(np_array, mock_estimator)
mock.assert_called_once()
def test_output_fn_json(np_array):
response = serving.default_output_fn(np_array, content_types.JSON)
assert response.get_data(as_text=True) == encoders.array_to_json(np_array.tolist())
assert response.content_type == content_types.JSON
def test_output_fn_csv(np_array):
response = serving.default_output_fn(np_array, content_types.CSV)
assert response.get_data(as_text=True) == '1.0,1.0\n1.0,1.0\n'
assert content_types.CSV in response.content_type
def test_output_fn_npz(np_array):
response = serving.default_output_fn(np_array, content_types.NPY)
assert response.get_data() == encoders.array_to_npy(np_array)
assert response.content_type == content_types.NPY
def test_input_fn_bad_accept():
with pytest.raises(errors.UnsupportedFormatError):
serving.default_output_fn('', 'application/not_supported')
@patch('importlib.import_module')
def test_import_module_execution_parameters(importlib_module_mock):
importlib_module_mock.return_value = DummyUserModule()
_, execution_parameters_fn = import_module('dummy_module', 'dummy_dir')
assert execution_parameters_fn == dummy_execution_parameters_fn
@patch('sagemaker_sklearn_container.serving.server')
def test_serving_entrypoint_start_gunicorn(mock_server):
mock_server.start = MagicMock()
serving.serving_entrypoint()
mock_server.start.assert_called_once()
@patch.dict(os.environ, {'SAGEMAKER_MULTI_MODEL': 'True', })
@patch('sagemaker_sklearn_container.serving.start_model_server')
def test_serving_entrypoint_start_mms(mock_start_model_server):
serving.serving_entrypoint()
mock_start_model_server.assert_called_once()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
controller/scheduler/discoverd.go | package main
import (
"os"
"time"
discoverd "github.com/drycc/drycc/discoverd/client"
"github.com/drycc/drycc/pkg/shutdown"
"github.com/drycc/drycc/pkg/stream"
"github.com/inconshreveable/log15"
)
const serviceName = "controller-scheduler"
type Discoverd interface {
Register() bool
LeaderCh() chan bool
}
func newDiscoverdWrapper(l log15.Logger) *discoverdWrapper {
return &discoverdWrapper{
leader: make(chan bool),
logger: l,
}
}
type discoverdWrapper struct {
leader chan bool
logger log15.Logger
}
func (d *discoverdWrapper) Register() bool {
log := d.logger.New("fn", "discoverd.Register")
var hb discoverd.Heartbeater
for {
var err error
log.Info("registering with service discovery")
hb, err = discoverd.AddServiceAndRegister(serviceName, ":"+os.Getenv("PORT"))
if err == nil {
break
}
log.Error("error registering with service discovery", "err", err)
time.Sleep(time.Second)
}
shutdown.BeforeExit(func() { hb.Close() })
selfAddr := hb.Addr()
log = log.New("self.addr", selfAddr)
service := discoverd.NewService(serviceName)
var leaders chan *discoverd.Instance
var stream stream.Stream
connect := func() (err error) {
log.Info("connecting service leader stream")
leaders = make(chan *discoverd.Instance)
stream, err = service.Leaders(leaders)
if err != nil {
log.Error("error connecting service leader stream", "err", err)
}
return
}
go func() {
for {
for {
if err := connect(); err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
for leader := range leaders {
if leader == nil {
// a nil leader indicates there are no instances for
// the service, ignore and wait for an actual leader
log.Warn("received nil leader event")
continue
}
log.Info("received leader event", "leader.addr", leader.Addr)
d.leader <- leader.Addr == selfAddr
}
log.Warn("service leader stream disconnected", "err", stream.Err())
}
}()
start := time.Now()
tick := time.Tick(30 * time.Second)
for {
select {
case isLeader := <-d.leader:
return isLeader
case <-tick:
log.Warn("still waiting for current service leader", "duration", time.Since(start))
}
}
}
func (d *discoverdWrapper) LeaderCh() chan bool {
return d.leader
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
pkg/state/state.go | package state
import (
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/roboll/helmfile/pkg/environment"
"github.com/roboll/helmfile/pkg/event"
"github.com/roboll/helmfile/pkg/helmexec"
"github.com/roboll/helmfile/pkg/remote"
"github.com/roboll/helmfile/pkg/tmpl"
"regexp"
"github.com/tatsushid/go-prettytable"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
)
// HelmState structure for the helmfile
type HelmState struct {
basePath string
FilePath string
// DefaultValues is the default values to be overrode by environment values and command-line overrides
DefaultValues []interface{} `yaml:"values,omitempty"`
Environments map[string]EnvironmentSpec `yaml:"environments,omitempty"`
Bases []string `yaml:"bases,omitempty"`
HelmDefaults HelmSpec `yaml:"helmDefaults,omitempty"`
Helmfiles []SubHelmfileSpec `yaml:"helmfiles,omitempty"`
DeprecatedContext string `yaml:"context,omitempty"`
DeprecatedReleases []ReleaseSpec `yaml:"charts,omitempty"`
Namespace string `yaml:"namespace,omitempty"`
Repositories []RepositorySpec `yaml:"repositories,omitempty"`
Releases []ReleaseSpec `yaml:"releases,omitempty"`
Selectors []string `yaml:"-"`
Templates map[string]TemplateSpec `yaml:"templates"`
Env environment.Environment `yaml:"-"`
logger *zap.SugaredLogger
readFile func(string) ([]byte, error)
removeFile func(string) error
fileExists func(string) (bool, error)
glob func(string) ([]string, error)
tempDir func(string, string) (string, error)
runner helmexec.Runner
helm helmexec.Interface
}
// SubHelmfileSpec defines the subhelmfile path and options
type SubHelmfileSpec struct {
//path or glob pattern for the sub helmfiles
Path string `yaml:"path,omitempty"`
//chosen selectors for the sub helmfiles
Selectors []string `yaml:"selectors,omitempty"`
//do the sub helmfiles inherits from parent selectors
SelectorsInherited bool `yaml:"selectorsInherited,omitempty"`
Environment SubhelmfileEnvironmentSpec
}
type SubhelmfileEnvironmentSpec struct {
OverrideValues []interface{} `yaml:"values,omitempty"`
}
// HelmSpec to defines helmDefault values
type HelmSpec struct {
KubeContext string `yaml:"kubeContext,omitempty"`
TillerNamespace string `yaml:"tillerNamespace,omitempty"`
Tillerless bool `yaml:"tillerless"`
Args []string `yaml:"args,omitempty"`
Verify bool `yaml:"verify"`
// Devel, when set to true, use development versions, too. Equivalent to version '>0.0.0-0'
Devel bool `yaml:"devel"`
// Wait, if set to true, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful
Wait bool `yaml:"wait"`
// Timeout is the time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks, and waits on pod/pvc/svc/deployment readiness) (default 300)
Timeout int `yaml:"timeout"`
// RecreatePods, when set to true, instruct helmfile to perform pods restart for the resource if applicable
RecreatePods bool `yaml:"recreatePods"`
// Force, when set to true, forces resource update through delete/recreate if needed
Force bool `yaml:"force"`
// Atomic, when set to true, restore previous state in case of a failed install/upgrade attempt
Atomic bool `yaml:"atomic"`
TLS bool `yaml:"tls"`
TLSCACert string `yaml:"tlsCACert,omitempty"`
TLSKey string `yaml:"tlsKey,omitempty"`
TLSCert string `yaml:"tlsCert,omitempty"`
}
// RepositorySpec that defines values for a helm repo
type RepositorySpec struct {
Name string `yaml:"name,omitempty"`
URL string `yaml:"url,omitempty"`
CaFile string `yaml:"caFile,omitempty"`
CertFile string `yaml:"certFile,omitempty"`
KeyFile string `yaml:"keyFile,omitempty"`
Username string `yaml:"username,omitempty"`
Password string `yaml:"password,omitempty"`
}
// ReleaseSpec defines the structure of a helm release
type ReleaseSpec struct {
// Chart is the name of the chart being installed to create this release
Chart string `yaml:"chart,omitempty"`
Version string `yaml:"version,omitempty"`
Verify *bool `yaml:"verify,omitempty"`
// Devel, when set to true, use development versions, too. Equivalent to version '>0.0.0-0'
Devel *bool `yaml:"devel,omitempty"`
// Wait, if set to true, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful
Wait *bool `yaml:"wait,omitempty"`
// Timeout is the time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks, and waits on pod/pvc/svc/deployment readiness) (default 300)
Timeout *int `yaml:"timeout,omitempty"`
// RecreatePods, when set to true, instruct helmfile to perform pods restart for the resource if applicable
RecreatePods *bool `yaml:"recreatePods,omitempty"`
// Force, when set to true, forces resource update through delete/recreate if needed
Force *bool `yaml:"force,omitempty"`
// Installed, when set to true, `delete --purge` the release
Installed *bool `yaml:"installed,omitempty"`
// Atomic, when set to true, restore previous state in case of a failed install/upgrade attempt
Atomic *bool `yaml:"atomic,omitempty"`
// MissingFileHandler is set to either "Error" or "Warn". "Error" instructs helmfile to fail when unable to find a values or secrets file. When "Warn", it prints the file and continues.
// The default value for MissingFileHandler is "Error".
MissingFileHandler *string `yaml:"missingFileHandler,omitempty"`
// Hooks is a list of extension points paired with operations, that are executed in specific points of the lifecycle of releases defined in helmfile
Hooks []event.Hook `yaml:"hooks,omitempty"`
// Name is the name of this release
Name string `yaml:"name,omitempty"`
Namespace string `yaml:"namespace,omitempty"`
Labels map[string]string `yaml:"labels,omitempty"`
Values []interface{} `yaml:"values,omitempty"`
Secrets []string `yaml:"secrets,omitempty"`
SetValues []SetValue `yaml:"set,omitempty"`
ValuesTemplate []interface{} `yaml:"valuesTemplate,omitempty"`
SetValuesTemplate []SetValue `yaml:"setTemplate,omitempty"`
// The 'env' section is not really necessary any longer, as 'set' would now provide the same functionality
EnvValues []SetValue `yaml:"env,omitempty"`
ValuesPathPrefix string `yaml:"valuesPathPrefix,omitempty"`
TillerNamespace string `yaml:"tillerNamespace,omitempty"`
Tillerless *bool `yaml:"tillerless,omitempty"`
KubeContext string `yaml:"kubeContext,omitempty"`
TLS *bool `yaml:"tls,omitempty"`
TLSCACert string `yaml:"tlsCACert,omitempty"`
TLSKey string `yaml:"tlsKey,omitempty"`
TLSCert string `yaml:"tlsCert,omitempty"`
// These values are used in templating
TillerlessTemplate *string `yaml:"tillerlessTemplate,omitempty"`
VerifyTemplate *string `yaml:"verifyTemplate,omitempty"`
WaitTemplate *string `yaml:"waitTemplate,omitempty"`
InstalledTemplate *string `yaml:"installedTemplate,omitempty"`
// These settings requires helm-x integration to work
Dependencies []Dependency `yaml:"dependencies,omitempty"`
JSONPatches []interface{} `yaml:"jsonPatches,omitempty"`
StrategicMergePatches []interface{} `yaml:"strategicMergePatches,omitempty"`
Adopt []string `yaml:"adopt,omitempty"`
// generatedValues are values that need cleaned up on exit
generatedValues []string
//version of the chart that has really been installed cause desired version may be fuzzy (~2.0.0)
installedVersion string
}
// SetValue are the key values to set on a helm release
type SetValue struct {
Name string `yaml:"name,omitempty"`
Value string `yaml:"value,omitempty"`
File string `yaml:"file,omitempty"`
Values []string `yaml:"values,omitempty"`
}
// AffectedReleases hold the list of released that where updated, deleted, or in error
type AffectedReleases struct {
Upgraded []*ReleaseSpec
Deleted []*ReleaseSpec
Failed []*ReleaseSpec
}
const DefaultEnv = "default"
const MissingFileHandlerError = "Error"
const MissingFileHandlerInfo = "Info"
const MissingFileHandlerWarn = "Warn"
const MissingFileHandlerDebug = "Debug"
func (st *HelmState) applyDefaultsTo(spec *ReleaseSpec) {
if st.Namespace != "" {
spec.Namespace = st.Namespace
}
}
type RepoUpdater interface {
AddRepo(name, repository, cafile, certfile, keyfile, username, password string) error
UpdateRepo() error
}
// SyncRepos will update the given helm releases
func (st *HelmState) SyncRepos(helm RepoUpdater) []error {
errs := []error{}
for _, repo := range st.Repositories {
if err := helm.AddRepo(repo.Name, repo.URL, repo.CaFile, repo.CertFile, repo.KeyFile, repo.Username, repo.Password); err != nil {
errs = append(errs, err)
}
}
if len(errs) != 0 {
return errs
}
if err := helm.UpdateRepo(); err != nil {
return []error{err}
}
return nil
}
type syncResult struct {
errors []*ReleaseError
}
type syncPrepareResult struct {
release *ReleaseSpec
flags []string
errors []*ReleaseError
}
// SyncReleases wrapper for executing helm upgrade on the releases
func (st *HelmState) prepareSyncReleases(helm helmexec.Interface, additionalValues []string, concurrency int, opt ...SyncOpt) ([]syncPrepareResult, []error) {
opts := &SyncOpts{}
for _, o := range opt {
o.Apply(opts)
}
releases := []*ReleaseSpec{}
for i, _ := range st.Releases {
releases = append(releases, &st.Releases[i])
}
numReleases := len(releases)
jobs := make(chan *ReleaseSpec, numReleases)
results := make(chan syncPrepareResult, numReleases)
res := []syncPrepareResult{}
errs := []error{}
mut := sync.Mutex{}
st.scatterGather(
concurrency,
numReleases,
func() {
for i := 0; i < numReleases; i++ {
jobs <- releases[i]
}
close(jobs)
},
func(workerIndex int) {
for release := range jobs {
st.applyDefaultsTo(release)
// If `installed: false`, the only potential operation on this release would be uninstalling.
// We skip generating values files in that case, because for an uninstall with `helm delete`, we don't need to those.
// The values files are for `helm upgrade -f values.yaml` calls that happens when the release has `installed: true`.
// This logic addresses:
// - https://github.com/roboll/helmfile/issues/519
// - https://github.com/roboll/helmfile/issues/616
if !release.Desired() {
results <- syncPrepareResult{release: release, flags: []string{}, errors: []*ReleaseError{}}
continue
}
// TODO We need a long-term fix for this :)
// See https://github.com/roboll/helmfile/issues/737
mut.Lock()
flags, flagsErr := st.flagsForUpgrade(helm, release, workerIndex)
mut.Unlock()
if flagsErr != nil {
results <- syncPrepareResult{errors: []*ReleaseError{newReleaseError(release, flagsErr)}}
continue
}
errs := []*ReleaseError{}
for _, value := range additionalValues {
valfile, err := filepath.Abs(value)
if err != nil {
errs = append(errs, newReleaseError(release, err))
}
ok, err := st.fileExists(valfile)
if err != nil {
errs = append(errs, newReleaseError(release, err))
} else if !ok {
errs = append(errs, newReleaseError(release, fmt.Errorf("file does not exist: %s", valfile)))
}
flags = append(flags, "--values", valfile)
}
if opts.Set != nil {
for _, s := range opts.Set {
flags = append(flags, "--set", s)
}
}
if len(errs) > 0 {
results <- syncPrepareResult{errors: errs}
continue
}
results <- syncPrepareResult{release: release, flags: flags, errors: []*ReleaseError{}}
}
},
func() {
for i := 0; i < numReleases; {
select {
case r := <-results:
for _, e := range r.errors {
errs = append(errs, e)
}
res = append(res, r)
i++
}
}
},
)
return res, errs
}
func (st *HelmState) isReleaseInstalled(context helmexec.HelmContext, helm helmexec.Interface, release ReleaseSpec) (bool, error) {
out, err := st.listReleases(context, helm, &release)
if err != nil {
return false, err
} else if out != "" {
return true, nil
}
return false, nil
}
func (st *HelmState) DetectReleasesToBeDeleted(helm helmexec.Interface) ([]*ReleaseSpec, error) {
detected := []*ReleaseSpec{}
for i := range st.Releases {
release := st.Releases[i]
if !release.Desired() {
installed, err := st.isReleaseInstalled(st.createHelmContext(&release, 0), helm, release)
if err != nil {
return nil, err
} else if installed {
// Otherwise `release` messed up(https://github.com/roboll/helmfile/issues/554)
r := release
detected = append(detected, &r)
}
}
}
return detected, nil
}
type SyncOpts struct {
Set []string
}
type SyncOpt interface{ Apply(*SyncOpts) }
func (o *SyncOpts) Apply(opts *SyncOpts) {
*opts = *o
}
// SyncReleases wrapper for executing helm upgrade on the releases
func (st *HelmState) SyncReleases(affectedReleases *AffectedReleases, helm helmexec.Interface, additionalValues []string, workerLimit int, opt ...SyncOpt) []error {
opts := &SyncOpts{}
for _, o := range opt {
o.Apply(opts)
}
preps, prepErrs := st.prepareSyncReleases(helm, additionalValues, workerLimit, opts)
if len(prepErrs) > 0 {
return prepErrs
}
errs := []error{}
jobQueue := make(chan *syncPrepareResult, len(preps))
results := make(chan syncResult, len(preps))
st.scatterGather(
workerLimit,
len(preps),
func() {
for i := 0; i < len(preps); i++ {
jobQueue <- &preps[i]
}
close(jobQueue)
},
func(workerIndex int) {
for prep := range jobQueue {
release := prep.release
flags := prep.flags
chart := normalizeChart(st.basePath, release.Chart)
var relErr *ReleaseError
context := st.createHelmContext(release, workerIndex)
if _, err := st.triggerPresyncEvent(release, "sync"); err != nil {
relErr = newReleaseError(release, err)
} else if !release.Desired() {
installed, err := st.isReleaseInstalled(context, helm, *release)
if err != nil {
relErr = newReleaseError(release, err)
} else if installed {
var args []string
if isHelm3() {
args = []string{}
} else {
args = []string{"--purge"}
}
deletionFlags := st.appendConnectionFlags(args, release)
if err := helm.DeleteRelease(context, release.Name, deletionFlags...); err != nil {
affectedReleases.Failed = append(affectedReleases.Failed, release)
relErr = newReleaseError(release, err)
} else {
affectedReleases.Deleted = append(affectedReleases.Deleted, release)
}
}
} else if err := helm.SyncRelease(context, release.Name, chart, flags...); err != nil {
affectedReleases.Failed = append(affectedReleases.Failed, release)
relErr = newReleaseError(release, err)
} else {
affectedReleases.Upgraded = append(affectedReleases.Upgraded, release)
installedVersion, err := st.getDeployedVersion(context, helm, release)
if err != nil { //err is not really impacting so just log it
st.logger.Debugf("getting deployed release version failed:%v", err)
} else {
release.installedVersion = installedVersion
}
}
if relErr == nil {
results <- syncResult{}
} else {
results <- syncResult{errors: []*ReleaseError{relErr}}
}
if _, err := st.triggerPostsyncEvent(release, "sync"); err != nil {
st.logger.Warnf("warn: %v\n", err)
}
if _, err := st.triggerCleanupEvent(release, "sync"); err != nil {
st.logger.Warnf("warn: %v\n", err)
}
}
},
func() {
for i := 0; i < len(preps); {
select {
case res := <-results:
if len(res.errors) > 0 {
for _, e := range res.errors {
errs = append(errs, e)
}
}
}
i++
}
},
)
if len(errs) > 0 {
return errs
}
return nil
}
func (st *HelmState) listReleases(context helmexec.HelmContext, helm helmexec.Interface, release *ReleaseSpec) (string, error) {
flags := st.connectionFlags(release)
if isHelm3() && release.Namespace != "" {
flags = append(flags, "--namespace", release.Namespace)
}
return helm.List(context, "^"+release.Name+"$", flags...)
}
func (st *HelmState) getDeployedVersion(context helmexec.HelmContext, helm helmexec.Interface, release *ReleaseSpec) (string, error) {
//retrieve the version
if out, err := st.listReleases(context, helm, release); err == nil {
chartName := filepath.Base(release.Chart)
//the regexp without escapes : .*\s.*\s.*\s.*\schartName-(.*?)\s
pat := regexp.MustCompile(".*\\s.*\\s.*\\s.*\\s" + chartName + "-(.*?)\\s")
versions := pat.FindStringSubmatch(out)
if len(versions) > 0 {
return versions[1], nil
} else {
//fails to find the version
return "failed to get version", errors.New("Failed to get the version for:" + chartName)
}
} else {
return "failed to get version", err
}
}
// downloadCharts will download and untar charts for Lint and Template
func (st *HelmState) downloadCharts(helm helmexec.Interface, dir string, concurrency int, helmfileCommand string) (map[string]string, []error) {
temp := make(map[string]string, len(st.Releases))
type downloadResults struct {
releaseName string
chartPath string
}
errs := []error{}
jobQueue := make(chan *ReleaseSpec, len(st.Releases))
results := make(chan *downloadResults, len(st.Releases))
st.scatterGather(
concurrency,
len(st.Releases),
func() {
for i := 0; i < len(st.Releases); i++ {
jobQueue <- &st.Releases[i]
}
close(jobQueue)
},
func(_ int) {
for release := range jobQueue {
chartPath := ""
if pathExists(normalizeChart(st.basePath, release.Chart)) {
chartPath = normalizeChart(st.basePath, release.Chart)
} else {
fetchFlags := []string{}
if release.Version != "" {
chartPath = path.Join(dir, release.Name, release.Version, release.Chart)
fetchFlags = append(fetchFlags, "--version", release.Version)
} else {
chartPath = path.Join(dir, release.Name, "latest", release.Chart)
}
if st.isDevelopment(release) {
fetchFlags = append(fetchFlags, "--devel")
}
// only fetch chart if it is not already fetched
if _, err := os.Stat(chartPath); os.IsNotExist(err) {
fetchFlags = append(fetchFlags, "--untar", "--untardir", chartPath)
if err := helm.Fetch(release.Chart, fetchFlags...); err != nil {
errs = append(errs, err)
}
}
// Set chartPath to be the path containing Chart.yaml, if found
fullChartPath, err := findChartDirectory(chartPath)
if err == nil {
chartPath = filepath.Dir(fullChartPath)
}
}
results <- &downloadResults{release.Name, chartPath}
}
},
func() {
for i := 0; i < len(st.Releases); i++ {
downloadRes := <-results
temp[downloadRes.releaseName] = downloadRes.chartPath
}
},
)
if len(errs) > 0 {
return nil, errs
}
return temp, nil
}
type TemplateOpts struct {
Set []string
}
type TemplateOpt interface{ Apply(*TemplateOpts) }
func (o *TemplateOpts) Apply(opts *TemplateOpts) {
*opts = *o
}
// TemplateReleases wrapper for executing helm template on the releases
func (st *HelmState) TemplateReleases(helm helmexec.Interface, outputDir string, additionalValues []string, args []string, workerLimit int, opt ...TemplateOpt) []error {
opts := &TemplateOpts{}
for _, o := range opt {
o.Apply(opts)
}
// Reset the extra args if already set, not to break `helm fetch` by adding the args intended for `lint`
helm.SetExtraArgs()
errs := []error{}
// Create tmp directory and bail immediately if it fails
dir, err := ioutil.TempDir("", "")
if err != nil {
errs = append(errs, err)
return errs
}
defer os.RemoveAll(dir)
temp, errs := st.downloadCharts(helm, dir, workerLimit, "template")
if errs != nil {
errs = append(errs, err)
return errs
}
if len(args) > 0 {
helm.SetExtraArgs(args...)
}
for i := range st.Releases {
release := st.Releases[i]
if !release.Desired() {
continue
}
st.applyDefaultsTo(&release)
flags, err := st.flagsForTemplate(helm, &release, 0)
if err != nil {
errs = append(errs, err)
}
for _, value := range additionalValues {
valfile, err := filepath.Abs(value)
if err != nil {
errs = append(errs, err)
}
if _, err := os.Stat(valfile); os.IsNotExist(err) {
errs = append(errs, err)
}
flags = append(flags, "--values", valfile)
}
if opts.Set != nil {
for _, s := range opts.Set {
flags = append(flags, "--set", s)
}
}
if len(outputDir) > 0 {
releaseOutputDir, err := st.GenerateOutputDir(outputDir, release)
if err != nil {
errs = append(errs, err)
}
flags = append(flags, "--output-dir", releaseOutputDir)
st.logger.Debugf("Generating templates to : %s\n", releaseOutputDir)
os.Mkdir(releaseOutputDir, 0755)
}
if len(errs) == 0 {
if err := helm.TemplateRelease(release.Name, temp[release.Name], flags...); err != nil {
errs = append(errs, err)
}
}
if _, err := st.triggerCleanupEvent(&release, "template"); err != nil {
st.logger.Warnf("warn: %v\n", err)
}
}
if len(errs) != 0 {
return errs
}
return nil
}
type LintOpts struct {
Set []string
}
type LintOpt interface{ Apply(*LintOpts) }
func (o *LintOpts) Apply(opts *LintOpts) {
*opts = *o
}
// LintReleases wrapper for executing helm lint on the releases
func (st *HelmState) LintReleases(helm helmexec.Interface, additionalValues []string, args []string, workerLimit int, opt ...LintOpt) []error {
opts := &LintOpts{}
for _, o := range opt {
o.Apply(opts)
}
// Reset the extra args if already set, not to break `helm fetch` by adding the args intended for `lint`
helm.SetExtraArgs()
errs := []error{}
// Create tmp directory and bail immediately if it fails
dir, err := ioutil.TempDir("", "")
if err != nil {
errs = append(errs, err)
return errs
}
defer os.RemoveAll(dir)
temp, errs := st.downloadCharts(helm, dir, workerLimit, "lint")
if errs != nil {
errs = append(errs, err)
return errs
}
if len(args) > 0 {
helm.SetExtraArgs(args...)
}
for i := range st.Releases {
release := st.Releases[i]
if !release.Desired() {
continue
}
flags, err := st.flagsForLint(helm, &release, 0)
if err != nil {
errs = append(errs, err)
}
for _, value := range additionalValues {
valfile, err := filepath.Abs(value)
if err != nil {
errs = append(errs, err)
}
if _, err := os.Stat(valfile); os.IsNotExist(err) {
errs = append(errs, err)
}
flags = append(flags, "--values", valfile)
}
if opts.Set != nil {
for _, s := range opts.Set {
flags = append(flags, "--set", s)
}
}
if len(errs) == 0 {
if err := helm.Lint(release.Name, temp[release.Name], flags...); err != nil {
errs = append(errs, err)
}
}
if _, err := st.triggerCleanupEvent(&release, "lint"); err != nil {
st.logger.Warnf("warn: %v\n", err)
}
}
if len(errs) != 0 {
return errs
}
return nil
}
type diffResult struct {
err *ReleaseError
}
type diffPrepareResult struct {
release *ReleaseSpec
flags []string
errors []*ReleaseError
}
func (st *HelmState) prepareDiffReleases(helm helmexec.Interface, additionalValues []string, concurrency int, detailedExitCode, suppressSecrets bool, opt ...DiffOpt) ([]diffPrepareResult, []error) {
opts := &DiffOpts{}
for _, o := range opt {
o.Apply(opts)
}
releases := []*ReleaseSpec{}
for i, _ := range st.Releases {
if !st.Releases[i].Desired() {
continue
}
releases = append(releases, &st.Releases[i])
}
numReleases := len(releases)
jobs := make(chan *ReleaseSpec, numReleases)
results := make(chan diffPrepareResult, numReleases)
rs := []diffPrepareResult{}
errs := []error{}
mut := sync.Mutex{}
st.scatterGather(
concurrency,
numReleases,
func() {
for i := 0; i < numReleases; i++ {
jobs <- releases[i]
}
close(jobs)
},
func(workerIndex int) {
for release := range jobs {
errs := []error{}
st.applyDefaultsTo(release)
// TODO We need a long-term fix for this :)
// See https://github.com/roboll/helmfile/issues/737
mut.Lock()
flags, err := st.flagsForDiff(helm, release, workerIndex)
mut.Unlock()
if err != nil {
errs = append(errs, err)
}
for _, value := range additionalValues {
valfile, err := filepath.Abs(value)
if err != nil {
errs = append(errs, err)
}
if _, err := os.Stat(valfile); os.IsNotExist(err) {
errs = append(errs, err)
}
flags = append(flags, "--values", valfile)
}
if detailedExitCode {
flags = append(flags, "--detailed-exitcode")
}
if suppressSecrets {
flags = append(flags, "--suppress-secrets")
}
if opts.NoColor {
flags = append(flags, "--no-color")
}
if opts.Context > 0 {
flags = append(flags, "--context", fmt.Sprintf("%d", opts.Context))
}
if opts.Set != nil {
for _, s := range opts.Set {
flags = append(flags, "--set", s)
}
}
if len(errs) > 0 {
rsErrs := make([]*ReleaseError, len(errs))
for i, e := range errs {
rsErrs[i] = newReleaseError(release, e)
}
results <- diffPrepareResult{errors: rsErrs}
} else {
results <- diffPrepareResult{release: release, flags: flags, errors: []*ReleaseError{}}
}
}
},
func() {
for i := 0; i < numReleases; i++ {
res := <-results
if res.errors != nil && len(res.errors) > 0 {
for _, e := range res.errors {
errs = append(errs, e)
}
} else if res.release != nil {
rs = append(rs, res)
}
}
},
)
return rs, errs
}
func (st *HelmState) createHelmContext(spec *ReleaseSpec, workerIndex int) helmexec.HelmContext {
namespace := st.HelmDefaults.TillerNamespace
if spec.TillerNamespace != "" {
namespace = spec.TillerNamespace
}
tillerless := st.HelmDefaults.Tillerless
if spec.Tillerless != nil {
tillerless = *spec.Tillerless
}
return helmexec.HelmContext{
Tillerless: tillerless,
TillerNamespace: namespace,
WorkerIndex: workerIndex,
}
}
type DiffOpts struct {
Context int
NoColor bool
Set []string
}
func (o *DiffOpts) Apply(opts *DiffOpts) {
*opts = *o
}
type DiffOpt interface{ Apply(*DiffOpts) }
// DiffReleases wrapper for executing helm diff on the releases
// It returns releases that had any changes
func (st *HelmState) DiffReleases(helm helmexec.Interface, additionalValues []string, workerLimit int, detailedExitCode, suppressSecrets bool, triggerCleanupEvents bool, opt ...DiffOpt) ([]*ReleaseSpec, []error) {
opts := &DiffOpts{}
for _, o := range opt {
o.Apply(opts)
}
preps, prepErrs := st.prepareDiffReleases(helm, additionalValues, workerLimit, detailedExitCode, suppressSecrets, opts)
if len(prepErrs) > 0 {
return []*ReleaseSpec{}, prepErrs
}
jobQueue := make(chan *diffPrepareResult, len(preps))
results := make(chan diffResult, len(preps))
rs := []*ReleaseSpec{}
errs := []error{}
st.scatterGather(
workerLimit,
len(preps),
func() {
for i := 0; i < len(preps); i++ {
jobQueue <- &preps[i]
}
close(jobQueue)
},
func(workerIndex int) {
for prep := range jobQueue {
flags := prep.flags
release := prep.release
if err := helm.DiffRelease(st.createHelmContext(release, workerIndex), release.Name, normalizeChart(st.basePath, release.Chart), flags...); err != nil {
switch e := err.(type) {
case helmexec.ExitError:
// Propagate any non-zero exit status from the external command like `helm` that is failed under the hood
results <- diffResult{&ReleaseError{release, err, e.ExitStatus()}}
default:
results <- diffResult{&ReleaseError{release, err, 0}}
}
} else {
// diff succeeded, found no changes
results <- diffResult{}
}
if triggerCleanupEvents {
if _, err := st.triggerCleanupEvent(prep.release, "diff"); err != nil {
st.logger.Warnf("warn: %v\n", err)
}
}
}
},
func() {
for i := 0; i < len(preps); i++ {
res := <-results
if res.err != nil {
errs = append(errs, res.err)
if res.err.Code == 2 {
rs = append(rs, res.err.ReleaseSpec)
}
}
}
},
)
return rs, errs
}
func (st *HelmState) ReleaseStatuses(helm helmexec.Interface, workerLimit int) []error {
return st.scatterGatherReleases(helm, workerLimit, func(release ReleaseSpec, workerIndex int) error {
if !release.Desired() {
return nil
}
flags := []string{}
flags = st.appendConnectionFlags(flags, &release)
return helm.ReleaseStatus(st.createHelmContext(&release, workerIndex), release.Name, flags...)
})
}
// DeleteReleases wrapper for executing helm delete on the releases
func (st *HelmState) DeleteReleases(affectedReleases *AffectedReleases, helm helmexec.Interface, concurrency int, purge bool) []error {
return st.scatterGatherReleases(helm, concurrency, func(release ReleaseSpec, workerIndex int) error {
if !release.Desired() {
return nil
}
flags := []string{}
if purge && !isHelm3() {
flags = append(flags, "--purge")
}
flags = st.appendConnectionFlags(flags, &release)
if isHelm3() && release.Namespace != "" {
flags = append(flags, "--namespace", release.Namespace)
}
context := st.createHelmContext(&release, workerIndex)
installed, err := st.isReleaseInstalled(context, helm, release)
if err != nil {
return err
}
if installed {
if err := helm.DeleteRelease(context, release.Name, flags...); err != nil {
affectedReleases.Failed = append(affectedReleases.Failed, &release)
return err
} else {
affectedReleases.Deleted = append(affectedReleases.Deleted, &release)
return nil
}
}
return nil
})
}
// TestReleases wrapper for executing helm test on the releases
func (st *HelmState) TestReleases(helm helmexec.Interface, cleanup bool, timeout int, concurrency int) []error {
return st.scatterGatherReleases(helm, concurrency, func(release ReleaseSpec, workerIndex int) error {
if !release.Desired() {
return nil
}
flags := []string{}
if cleanup {
flags = append(flags, "--cleanup")
}
duration := strconv.Itoa(timeout)
if isHelm3() {
duration += "s"
}
flags = append(flags, "--timeout", duration)
flags = st.appendConnectionFlags(flags, &release)
return helm.TestRelease(st.createHelmContext(&release, workerIndex), release.Name, flags...)
})
}
func isHelm3() bool {
return os.Getenv("HELMFILE_HELM3") != ""
}
// Clean will remove any generated secrets
func (st *HelmState) Clean() []error {
errs := []error{}
for _, release := range st.Releases {
for _, value := range release.generatedValues {
err := st.removeFile(value)
if err != nil {
errs = append(errs, err)
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}
// FilterReleases allows for the execution of helm commands against a subset of the releases in the helmfile.
func (st *HelmState) FilterReleases() error {
var filteredReleases []ReleaseSpec
releaseSet := map[string][]ReleaseSpec{}
filters := []ReleaseFilter{}
for _, label := range st.Selectors {
f, err := ParseLabels(label)
if err != nil {
return err
}
filters = append(filters, f)
}
for _, r := range st.Releases {
if r.Labels == nil {
r.Labels = map[string]string{}
}
// Let the release name, namespace, and chart be used as a tag
r.Labels["name"] = r.Name
r.Labels["namespace"] = r.Namespace
// Strip off just the last portion for the name stable/newrelic would give newrelic
chartSplit := strings.Split(r.Chart, "/")
r.Labels["chart"] = chartSplit[len(chartSplit)-1]
for _, f := range filters {
if r.Labels == nil {
r.Labels = map[string]string{}
}
if f.Match(r) {
releaseSet[r.Name] = append(releaseSet[r.Name], r)
continue
}
}
}
for _, r := range releaseSet {
filteredReleases = append(filteredReleases, r...)
}
st.Releases = filteredReleases
numFound := len(filteredReleases)
st.logger.Debugf("%d release(s) matching %s found in %s\n", numFound, strings.Join(st.Selectors, ","), st.FilePath)
return nil
}
func (st *HelmState) PrepareReleases(helm helmexec.Interface, helmfileCommand string) []error {
errs := []error{}
for i := range st.Releases {
release := st.Releases[i]
if _, err := st.triggerPrepareEvent(&release, helmfileCommand); err != nil {
errs = append(errs, newReleaseError(&release, err))
continue
}
}
if len(errs) != 0 {
return errs
}
updated, err := st.ResolveDeps()
if err != nil {
return []error{err}
}
*st = *updated
return nil
}
func (st *HelmState) triggerPrepareEvent(r *ReleaseSpec, helmfileCommand string) (bool, error) {
return st.triggerReleaseEvent("prepare", r, helmfileCommand)
}
func (st *HelmState) triggerCleanupEvent(r *ReleaseSpec, helmfileCommand string) (bool, error) {
return st.triggerReleaseEvent("cleanup", r, helmfileCommand)
}
func (st *HelmState) triggerPresyncEvent(r *ReleaseSpec, helmfileCommand string) (bool, error) {
return st.triggerReleaseEvent("presync", r, helmfileCommand)
}
func (st *HelmState) triggerPostsyncEvent(r *ReleaseSpec, helmfileCommand string) (bool, error) {
return st.triggerReleaseEvent("postsync", r, helmfileCommand)
}
func (st *HelmState) triggerReleaseEvent(evt string, r *ReleaseSpec, helmfileCmd string) (bool, error) {
bus := &event.Bus{
Hooks: r.Hooks,
StateFilePath: st.FilePath,
BasePath: st.basePath,
Namespace: st.Namespace,
Env: st.Env,
Logger: st.logger,
ReadFile: st.readFile,
}
data := map[string]interface{}{
"Release": r,
"HelmfileCommand": helmfileCmd,
}
return bus.Trigger(evt, data)
}
// ResolveDeps returns a copy of this helmfile state with the concrete chart version numbers filled in for remote chart dependencies
func (st *HelmState) ResolveDeps() (*HelmState, error) {
return st.mergeLockedDependencies()
}
// UpdateDeps wrapper for updating dependencies on the releases
func (st *HelmState) UpdateDeps(helm helmexec.Interface) []error {
var errs []error
for _, release := range st.Releases {
if isLocalChart(release.Chart) {
if err := helm.UpdateDeps(normalizeChart(st.basePath, release.Chart)); err != nil {
errs = append(errs, err)
}
}
}
if len(errs) == 0 {
tempDir := st.tempDir
if tempDir == nil {
tempDir = ioutil.TempDir
}
_, err := st.updateDependenciesInTempDir(helm, tempDir)
if err != nil {
errs = append(errs, fmt.Errorf("unable to update deps: %v", err))
}
}
if len(errs) != 0 {
return errs
}
return nil
}
// BuildDeps wrapper for building dependencies on the releases
func (st *HelmState) BuildDeps(helm helmexec.Interface) []error {
errs := []error{}
for _, release := range st.Releases {
if isLocalChart(release.Chart) {
if err := helm.BuildDeps(release.Name, normalizeChart(st.basePath, release.Chart)); err != nil {
errs = append(errs, err)
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}
func pathExists(chart string) bool {
_, err := os.Stat(chart)
return err == nil
}
func chartNameWithoutRepository(chart string) string {
chartSplit := strings.Split(chart, "/")
return chartSplit[len(chartSplit)-1]
}
// find "Chart.yaml"
func findChartDirectory(topLevelDir string) (string, error) {
var files []string
filepath.Walk(topLevelDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("error walking through %s: %v", path, err)
}
if !f.IsDir() {
r, err := regexp.MatchString("Chart.yaml", f.Name())
if err == nil && r {
files = append(files, path)
}
}
return nil
})
// Sort to get the shortest path
sort.Strings(files)
if len(files) > 0 {
first := files[0]
return first, nil
}
return topLevelDir, errors.New("No Chart.yaml found")
}
// appendConnectionFlags append all the helm command-line flags related to K8s API and Tiller connection including the kubecontext
func (st *HelmState) appendConnectionFlags(flags []string, release *ReleaseSpec) []string {
adds := st.connectionFlags(release)
for _, a := range adds {
flags = append(flags, a)
}
return flags
}
func (st *HelmState) connectionFlags(release *ReleaseSpec) []string {
flags := []string{}
tillerless := st.HelmDefaults.Tillerless
if release.Tillerless != nil {
tillerless = *release.Tillerless
}
if !tillerless {
if release.TillerNamespace != "" {
flags = append(flags, "--tiller-namespace", release.TillerNamespace)
} else if st.HelmDefaults.TillerNamespace != "" {
flags = append(flags, "--tiller-namespace", st.HelmDefaults.TillerNamespace)
}
if release.TLS != nil && *release.TLS || release.TLS == nil && st.HelmDefaults.TLS {
flags = append(flags, "--tls")
}
if release.TLSKey != "" {
flags = append(flags, "--tls-key", release.TLSKey)
} else if st.HelmDefaults.TLSKey != "" {
flags = append(flags, "--tls-key", st.HelmDefaults.TLSKey)
}
if release.TLSCert != "" {
flags = append(flags, "--tls-cert", release.TLSCert)
} else if st.HelmDefaults.TLSCert != "" {
flags = append(flags, "--tls-cert", st.HelmDefaults.TLSCert)
}
if release.TLSCACert != "" {
flags = append(flags, "--tls-ca-cert", release.TLSCACert)
} else if st.HelmDefaults.TLSCACert != "" {
flags = append(flags, "--tls-ca-cert", st.HelmDefaults.TLSCACert)
}
if release.KubeContext != "" {
flags = append(flags, "--kube-context", release.KubeContext)
} else if st.HelmDefaults.KubeContext != "" {
flags = append(flags, "--kube-context", st.HelmDefaults.KubeContext)
}
}
return flags
}
func (st *HelmState) flagsForUpgrade(helm helmexec.Interface, release *ReleaseSpec, workerIndex int) ([]string, error) {
flags := []string{}
if release.Version != "" {
flags = append(flags, "--version", release.Version)
}
if st.isDevelopment(release) {
flags = append(flags, "--devel")
}
if release.Verify != nil && *release.Verify || release.Verify == nil && st.HelmDefaults.Verify {
flags = append(flags, "--verify")
}
if release.Wait != nil && *release.Wait || release.Wait == nil && st.HelmDefaults.Wait {
flags = append(flags, "--wait")
}
timeout := st.HelmDefaults.Timeout
if release.Timeout != nil {
timeout = *release.Timeout
}
if timeout != 0 {
duration := strconv.Itoa(timeout)
if isHelm3() {
duration += "s"
}
flags = append(flags, "--timeout", duration)
}
if release.Force != nil && *release.Force || release.Force == nil && st.HelmDefaults.Force {
flags = append(flags, "--force")
}
if release.RecreatePods != nil && *release.RecreatePods || release.RecreatePods == nil && st.HelmDefaults.RecreatePods {
flags = append(flags, "--recreate-pods")
}
if release.Atomic != nil && *release.Atomic || release.Atomic == nil && st.HelmDefaults.Atomic {
flags = append(flags, "--atomic")
}
flags = st.appendConnectionFlags(flags, release)
var err error
flags, err = st.appendHelmXFlags(flags, release)
if err != nil {
return nil, err
}
common, err := st.namespaceAndValuesFlags(helm, release, workerIndex)
if err != nil {
return nil, err
}
return append(flags, common...), nil
}
func (st *HelmState) flagsForTemplate(helm helmexec.Interface, release *ReleaseSpec, workerIndex int) ([]string, error) {
flags := []string{}
var err error
flags, err = st.appendHelmXFlags(flags, release)
if err != nil {
return nil, err
}
common, err := st.namespaceAndValuesFlags(helm, release, workerIndex)
if err != nil {
return nil, err
}
return append(flags, common...), nil
}
func (st *HelmState) flagsForDiff(helm helmexec.Interface, release *ReleaseSpec, workerIndex int) ([]string, error) {
flags := []string{}
if release.Version != "" {
flags = append(flags, "--version", release.Version)
}
if st.isDevelopment(release) {
flags = append(flags, "--devel")
}
flags = st.appendConnectionFlags(flags, release)
var err error
flags, err = st.appendHelmXFlags(flags, release)
if err != nil {
return nil, err
}
common, err := st.namespaceAndValuesFlags(helm, release, workerIndex)
if err != nil {
return nil, err
}
return append(flags, common...), nil
}
func (st *HelmState) isDevelopment(release *ReleaseSpec) bool {
result := st.HelmDefaults.Devel
if release.Devel != nil {
result = *release.Devel
}
return result
}
func (st *HelmState) flagsForLint(helm helmexec.Interface, release *ReleaseSpec, workerIndex int) ([]string, error) {
flags, err := st.namespaceAndValuesFlags(helm, release, workerIndex)
if err != nil {
return nil, err
}
flags, err = st.appendHelmXFlags(flags, release)
if err != nil {
return nil, err
}
return flags, nil
}
func (st *HelmState) RenderValuesFileToBytes(path string) ([]byte, error) {
r := tmpl.NewFileRenderer(st.readFile, filepath.Dir(path), st.valuesFileTemplateData())
return r.RenderToBytes(path)
}
func (st *HelmState) storage() *Storage {
return &Storage{
FilePath: st.FilePath,
basePath: st.basePath,
glob: st.glob,
logger: st.logger,
}
}
func (st *HelmState) ExpandedHelmfiles() ([]SubHelmfileSpec, error) {
helmfiles := []SubHelmfileSpec{}
for _, hf := range st.Helmfiles {
if remote.IsRemote(hf.Path) {
helmfiles = append(helmfiles, hf)
continue
}
matches, err := st.storage().ExpandPaths(hf.Path)
if err != nil {
return nil, err
}
if len(matches) == 0 {
continue
}
for _, match := range matches {
newHelmfile := hf
newHelmfile.Path = match
helmfiles = append(helmfiles, newHelmfile)
}
}
return helmfiles, nil
}
func (st *HelmState) generateTemporaryValuesFiles(values []interface{}, missingFileHandler *string) ([]string, error) {
generatedFiles := []string{}
for _, value := range values {
switch typedValue := value.(type) {
case string:
paths, skip, err := st.storage().resolveFile(missingFileHandler, "values", typedValue)
if err != nil {
return nil, err
}
if skip {
continue
}
if len(paths) > 1 {
return nil, fmt.Errorf("glob patterns in release values and secrets is not supported yet. please submit a feature request if necessary")
}
path := paths[0]
yamlBytes, err := st.RenderValuesFileToBytes(path)
if err != nil {
return nil, fmt.Errorf("failed to render values files \"%s\": %v", typedValue, err)
}
valfile, err := ioutil.TempFile("", "values")
if err != nil {
return nil, err
}
defer valfile.Close()
if _, err := valfile.Write(yamlBytes); err != nil {
return nil, fmt.Errorf("failed to write %s: %v", valfile.Name(), err)
}
st.logger.Debugf("successfully generated the value file at %s. produced:\n%s", path, string(yamlBytes))
generatedFiles = append(generatedFiles, valfile.Name())
case map[interface{}]interface{}:
valfile, err := ioutil.TempFile("", "values")
if err != nil {
return nil, err
}
defer valfile.Close()
encoder := yaml.NewEncoder(valfile)
defer encoder.Close()
if err := encoder.Encode(typedValue); err != nil {
return nil, err
}
generatedFiles = append(generatedFiles, valfile.Name())
default:
return nil, fmt.Errorf("unexpected type of value: value=%v, type=%T", typedValue, typedValue)
}
}
return generatedFiles, nil
}
func (st *HelmState) namespaceAndValuesFlags(helm helmexec.Interface, release *ReleaseSpec, workerIndex int) ([]string, error) {
flags := []string{}
if release.Namespace != "" {
flags = append(flags, "--namespace", release.Namespace)
}
values := []interface{}{}
for _, v := range release.Values {
switch typedValue := v.(type) {
case string:
path := st.storage().normalizePath(release.ValuesPathPrefix + typedValue)
values = append(values, path)
default:
values = append(values, v)
}
}
generatedFiles, err := st.generateTemporaryValuesFiles(values, release.MissingFileHandler)
if err != nil {
return nil, err
}
for _, f := range generatedFiles {
flags = append(flags, "--values", f)
}
release.generatedValues = append(release.generatedValues, generatedFiles...)
for _, value := range release.Secrets {
paths, skip, err := st.storage().resolveFile(release.MissingFileHandler, "secrets", release.ValuesPathPrefix+value)
if err != nil {
return nil, err
}
if skip {
continue
}
if len(paths) > 1 {
return nil, fmt.Errorf("glob patterns in release secret file is not supported yet. please submit a feature request if necessary")
}
path := paths[0]
decryptFlags := st.appendConnectionFlags([]string{}, release)
valfile, err := helm.DecryptSecret(st.createHelmContext(release, workerIndex), path, decryptFlags...)
if err != nil {
return nil, err
}
release.generatedValues = append(release.generatedValues, valfile)
flags = append(flags, "--values", valfile)
}
if len(release.SetValues) > 0 {
for _, set := range release.SetValues {
if set.Value != "" {
flags = append(flags, "--set", fmt.Sprintf("%s=%s", escape(set.Name), escape(set.Value)))
} else if set.File != "" {
flags = append(flags, "--set-file", fmt.Sprintf("%s=%s", escape(set.Name), st.storage().normalizePath(set.File)))
} else if len(set.Values) > 0 {
items := make([]string, len(set.Values))
for i, raw := range set.Values {
items[i] = escape(raw)
}
v := strings.Join(items, ",")
flags = append(flags, "--set", fmt.Sprintf("%s={%s}", escape(set.Name), v))
}
}
}
/***********
* START 'env' section for backwards compatibility
***********/
// The 'env' section is not really necessary any longer, as 'set' would now provide the same functionality
if len(release.EnvValues) > 0 {
val := []string{}
envValErrs := []string{}
for _, set := range release.EnvValues {
value, isSet := os.LookupEnv(set.Value)
if isSet {
val = append(val, fmt.Sprintf("%s=%s", escape(set.Name), escape(value)))
} else {
errMsg := fmt.Sprintf("\t%s", set.Value)
envValErrs = append(envValErrs, errMsg)
}
}
if len(envValErrs) != 0 {
joinedEnvVals := strings.Join(envValErrs, "\n")
errMsg := fmt.Sprintf("Environment Variables not found. Please make sure they are set and try again:\n%s", joinedEnvVals)
return nil, errors.New(errMsg)
}
flags = append(flags, "--set", strings.Join(val, ","))
}
/**************
* END 'env' section for backwards compatibility
**************/
return flags, nil
}
// DisplayAffectedReleases logs the upgraded, deleted and in error releases
func (ar *AffectedReleases) DisplayAffectedReleases(logger *zap.SugaredLogger) {
if ar.Upgraded != nil {
logger.Info("\nList of updated releases :")
tbl, _ := prettytable.NewTable(prettytable.Column{Header: "RELEASE"},
prettytable.Column{Header: "CHART", MinWidth: 6},
prettytable.Column{Header: "VERSION", AlignRight: true},
)
tbl.Separator = " "
for _, release := range ar.Upgraded {
tbl.AddRow(release.Name, release.Chart, release.installedVersion)
}
tbl.Print()
}
if ar.Deleted != nil {
logger.Info("\nList of deleted releases :")
logger.Info("RELEASE")
for _, release := range ar.Deleted {
logger.Info(release.Name)
}
}
if ar.Failed != nil {
logger.Info("\nList of releases in error :")
logger.Info("RELEASE")
for _, release := range ar.Failed {
logger.Info(release.Name)
}
}
}
func escape(value string) string {
intermediate := strings.Replace(value, "{", "\\{", -1)
intermediate = strings.Replace(intermediate, "}", "\\}", -1)
return strings.Replace(intermediate, ",", "\\,", -1)
}
//UnmarshalYAML will unmarshal the helmfile yaml section and fill the SubHelmfileSpec structure
//this is required to keep allowing string scalar for defining helmfile
func (hf *SubHelmfileSpec) UnmarshalYAML(unmarshal func(interface{}) error) error {
var tmp interface{}
if err := unmarshal(&tmp); err != nil {
return err
}
switch i := tmp.(type) {
case string: // single path definition without sub items, legacy sub helmfile definition
hf.Path = i
case map[interface{}]interface{}: // helmfile path with sub section
var subHelmfileSpecTmp struct {
Path string `yaml:"path"`
Selectors []string `yaml:"selectors"`
SelectorsInherited bool `yaml:"selectorsInherited"`
Environment SubhelmfileEnvironmentSpec `yaml:",inline"`
}
if err := unmarshal(&subHelmfileSpecTmp); err != nil {
return err
}
hf.Path = subHelmfileSpecTmp.Path
hf.Selectors = subHelmfileSpecTmp.Selectors
hf.SelectorsInherited = subHelmfileSpecTmp.SelectorsInherited
hf.Environment = subHelmfileSpecTmp.Environment
}
//since we cannot make sur the "console" string can be red after the "path" we must check we don't have
//a SubHelmfileSpec with only selector and no path
if hf.Selectors != nil && hf.Path == "" {
return fmt.Errorf("found 'selectors' definition without path: %v", hf.Selectors)
}
//also exclude SelectorsInherited to true and explicit selectors
if hf.SelectorsInherited && len(hf.Selectors) > 0 {
return fmt.Errorf("You cannot use 'SelectorsInherited: true' along with and explicit selector for path: %v", hf.Path)
}
return nil
}
func (st *HelmState) GenerateOutputDir(outputDir string, release ReleaseSpec) (string, error) {
// get absolute path of state file to generate a hash
// use this hash to write helm output in a specific directory by state file and release name
// ie. in a directory named stateFileName-stateFileHash-releaseName
stateAbsPath, err := filepath.Abs(st.FilePath)
if err != nil {
return stateAbsPath, err
}
hasher := sha1.New()
io.WriteString(hasher, stateAbsPath)
var stateFileExtension = filepath.Ext(st.FilePath)
var stateFileName = st.FilePath[0 : len(st.FilePath)-len(stateFileExtension)]
var sb strings.Builder
sb.WriteString(stateFileName)
sb.WriteString("-")
sb.WriteString(hex.EncodeToString(hasher.Sum(nil))[:8])
sb.WriteString("-")
sb.WriteString(release.Name)
return path.Join(outputDir, sb.String()), nil
}
func (st *HelmState) ToYaml() (string, error) {
if result, err := yaml.Marshal(st); err != nil {
return "", err
} else {
return string(result), nil
}
}
| [
"\"HELMFILE_HELM3\""
] | [] | [
"HELMFILE_HELM3"
] | [] | ["HELMFILE_HELM3"] | go | 1 | 0 | |
kuryr_kubernetes/cni/main.py | # Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import signal
import six
import sys
import os_vif
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes.cni import api as cni_api
from kuryr_kubernetes.cni import utils
from kuryr_kubernetes import config
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes import objects as k_objects
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_CNI_TIMEOUT = 180
def run():
if six.PY3:
d = jsonutils.load(sys.stdin.buffer)
else:
d = jsonutils.load(sys.stdin)
cni_conf = utils.CNIConfig(d)
args = (['--config-file', cni_conf.kuryr_conf] if 'kuryr_conf' in d
else [])
try:
if cni_conf.debug:
args.append('-d')
except AttributeError:
pass
config.init(args)
config.setup_logging()
# Initialize o.vo registry.
k_objects.register_locally_defined_vifs()
os_vif.initialize()
runner = cni_api.CNIDaemonizedRunner()
def _timeout(signum, frame):
runner._write_dict(sys.stdout, {
'msg': 'timeout',
'code': k_const.CNI_TIMEOUT_CODE,
})
LOG.debug('timed out')
sys.exit(1)
signal.signal(signal.SIGALRM, _timeout)
signal.alarm(_CNI_TIMEOUT)
status = runner.run(os.environ, cni_conf, sys.stdout)
LOG.debug("Exiting with status %s", status)
if status:
sys.exit(status)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
plugin/core/types.py | from .collections import DottedDict
from .file_watcher import FileWatcherEventType
from .logging import debug, set_debug_logging
from .protocol import TextDocumentSyncKindNone
from .typing import Any, Optional, List, Dict, Generator, Callable, Iterable, Union, Set, Tuple, TypedDict, TypeVar
from .typing import cast
from .url import filename_to_uri
from .url import uri_to_filename
from threading import RLock
from wcmatch.glob import BRACE
from wcmatch.glob import globmatch
from wcmatch.glob import GLOBSTAR
import contextlib
import os
import socket
import sublime
import time
import urllib.parse
TCP_CONNECT_TIMEOUT = 5 # seconds
FEATURES_TIMEOUT = 300 # milliseconds
PANEL_FILE_REGEX = r"^(?!\s+\d+:\d+)(.*)(:)$"
PANEL_LINE_REGEX = r"^\s+(\d+):(\d+)"
FileWatcherConfig = TypedDict("FileWatcherConfig", {
"pattern": Optional[str],
"events": Optional[List[FileWatcherEventType]],
"ignores": Optional[List[str]],
}, total=False)
def basescope2languageid(base_scope: str) -> str:
# This the connection between Language IDs and ST selectors.
base_scope_map = sublime.load_settings("language-ids.sublime-settings")
result = base_scope_map.get(base_scope, base_scope.split(".")[-1])
return result if isinstance(result, str) else ""
@contextlib.contextmanager
def runtime(token: str) -> Generator[None, None, None]:
t = time.time()
yield
debug(token, "running time:", int((time.time() - t) * 1000000), "μs")
T = TypeVar("T")
def diff(old: Iterable[T], new: Iterable[T]) -> Tuple[Set[T], Set[T]]:
"""
Return a tuple of (added, removed) items
"""
old_set = old if isinstance(old, set) else set(old)
new_set = new if isinstance(new, set) else set(new)
added = new_set - old_set
removed = old_set - new_set
return added, removed
def debounced(f: Callable[[], Any], timeout_ms: int = 0, condition: Callable[[], bool] = lambda: True,
async_thread: bool = False) -> None:
"""
Possibly run a function at a later point in time, either on the async thread or on the main thread.
:param f: The function to possibly run. Its return type is discarded.
:param timeout_ms: The time in milliseconds after which to possibly to run the function
:param condition: The condition that must evaluate to True in order to run the funtion
:param async_thread: If true, run the function on the async worker thread, otherwise run the function on the
main thread
"""
def run() -> None:
if condition():
f()
runner = sublime.set_timeout_async if async_thread else sublime.set_timeout
runner(run, timeout_ms)
class SettingsRegistration:
__slots__ = ("_settings",)
def __init__(self, settings: sublime.Settings, on_change: Callable[[], None]) -> None:
self._settings = settings
settings.add_on_change("LSP", on_change)
def __del__(self) -> None:
self._settings.clear_on_change("LSP")
class Debouncer:
def __init__(self) -> None:
self._current_id = -1
self._next_id = 0
self._current_id_lock = RLock()
def debounce(self, f: Callable[[], None], timeout_ms: int = 0, condition: Callable[[], bool] = lambda: True,
async_thread: bool = False) -> None:
"""
Possibly run a function at a later point in time, either on the async thread or on the main thread.
:param f: The function to possibly run
:param timeout_ms: The time in milliseconds after which to possibly to run the function
:param condition: The condition that must evaluate to True in order to run the funtion
:param async_thread: If true, run the function on the async worker thread, otherwise run
the function on the main thread
"""
def run(debounce_id: int) -> None:
with self._current_id_lock:
if debounce_id != self._current_id:
return
if condition():
f()
runner = sublime.set_timeout_async if async_thread else sublime.set_timeout
with self._current_id_lock:
current_id = self._current_id = self._next_id
self._next_id += 1
runner(lambda: run(current_id), timeout_ms)
def cancel_pending(self) -> None:
with self._current_id_lock:
self._current_id = -1
def read_dict_setting(settings_obj: sublime.Settings, key: str, default: dict) -> dict:
val = settings_obj.get(key)
return val if isinstance(val, dict) else default
def read_list_setting(settings_obj: sublime.Settings, key: str, default: list) -> list:
val = settings_obj.get(key)
return val if isinstance(val, list) else default
class Settings:
# This is only for mypy
diagnostics_additional_delay_auto_complete_ms = None # type: int
diagnostics_delay_ms = None # type: int
diagnostics_gutter_marker = None # type: str
diagnostics_panel_include_severity_level = None # type: int
disabled_capabilities = None # type: List[str]
document_highlight_style = None # type: str
inhibit_snippet_completions = None # type: bool
inhibit_word_completions = None # type: bool
log_debug = None # type: bool
log_max_size = None # type: int
log_server = None # type: List[str]
lsp_code_actions_on_save = None # type: Dict[str, bool]
lsp_format_on_save = None # type: bool
on_save_task_timeout_ms = None # type: int
only_show_lsp_completions = None # type: bool
popup_max_characters_height = None # type: int
popup_max_characters_width = None # type: int
show_code_actions = None # type: str
show_code_lens = None # type: str
show_code_actions_in_hover = None # type: bool
show_diagnostics_count_in_view_status = None # type: bool
show_diagnostics_highlights = None # type: bool
show_diagnostics_in_view_status = None # type: bool
show_diagnostics_panel_on_save = None # type: int
show_diagnostics_severity_level = None # type: int
show_references_in_quick_panel = None # type: bool
show_symbol_action_links = None # type: bool
show_view_status = None # type: bool
def __init__(self, s: sublime.Settings) -> None:
self.update(s)
def update(self, s: sublime.Settings) -> None:
def r(name: str, default: Union[bool, int, str, list, dict]) -> None:
val = s.get(name)
setattr(self, name, val if isinstance(val, default.__class__) else default)
r("diagnostics_additional_delay_auto_complete_ms", 0)
r("diagnostics_delay_ms", 0)
r("diagnostics_gutter_marker", "dot")
r("diagnostics_panel_include_severity_level", 4)
r("disabled_capabilities", [])
r("document_highlight_style", "underline")
r("log_debug", False)
r("log_max_size", 8 * 1024)
r("lsp_code_actions_on_save", {})
r("lsp_format_on_save", False)
r("on_save_task_timeout_ms", 2000)
r("only_show_lsp_completions", False)
r("popup_max_characters_height", 1000)
r("popup_max_characters_width", 120)
r("show_code_actions", "annotation")
r("show_code_lens", "annotation")
r("show_code_actions_in_hover", True)
r("show_diagnostics_count_in_view_status", False)
r("show_diagnostics_in_view_status", True)
r("show_diagnostics_highlights", True)
r("show_diagnostics_panel_on_save", 2)
r("show_diagnostics_severity_level", 2)
r("show_references_in_quick_panel", False)
r("show_symbol_action_links", False)
r("show_view_status", True)
# Backwards-compatible with the bool setting
log_server = s.get("log_server")
if isinstance(log_server, bool):
self.log_server = ["panel"] if log_server else []
elif isinstance(log_server, list):
self.log_server = log_server
else:
self.log_server = []
# Backwards-compatible with the bool setting
auto_show_diagnostics_panel = s.get("auto_show_diagnostics_panel")
if isinstance(auto_show_diagnostics_panel, bool):
if not auto_show_diagnostics_panel:
self.show_diagnostics_panel_on_save = 0
elif isinstance(auto_show_diagnostics_panel, str):
if auto_show_diagnostics_panel == "never":
self.show_diagnostics_panel_on_save = 0
# Backwards-compatible with "only_show_lsp_completions"
only_show_lsp_completions = s.get("only_show_lsp_completions")
if isinstance(only_show_lsp_completions, bool):
self.inhibit_snippet_completions = only_show_lsp_completions
self.inhibit_word_completions = only_show_lsp_completions
else:
r("inhibit_snippet_completions", False)
r("inhibit_word_completions", True)
# Backwards-compatible with "diagnostics_highlight_style"
diagnostics_highlight_style = s.get("diagnostics_highlight_style")
if isinstance(diagnostics_highlight_style, str):
if not diagnostics_highlight_style:
self.show_diagnostics_highlights = False
# Backwards-compatible with "code_action_on_save_timeout_ms"
code_action_on_save_timeout_ms = s.get("code_action_on_save_timeout_ms")
if isinstance(code_action_on_save_timeout_ms, int):
self.on_save_task_timeout_ms = code_action_on_save_timeout_ms
set_debug_logging(self.log_debug)
def document_highlight_style_region_flags(self) -> Tuple[int, int]:
if self.document_highlight_style == "fill":
return sublime.DRAW_NO_OUTLINE, sublime.DRAW_NO_OUTLINE
elif self.document_highlight_style == "stippled":
return sublime.DRAW_NO_FILL, sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_STIPPLED_UNDERLINE # noqa: E501
else:
return sublime.DRAW_NO_FILL, sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE
class ClientStates:
STARTING = 0
READY = 1
STOPPING = 2
class DocumentFilter:
"""
A document filter denotes a document through properties like language, scheme or pattern. An example is a filter
that applies to TypeScript files on disk. Another example is a filter that applies to JSON files with name
package.json:
{ "language": "typescript", scheme: "file" }
{ "language": "json", "pattern": "**/package.json" }
Sublime Text doesn't understand what a language ID is, so we have to maintain a global translation map from language
IDs to selectors. Sublime Text also has no support for patterns. We use the wcmatch library for this.
"""
__slots__ = ("language", "scheme", "pattern")
def __init__(
self,
language: Optional[str] = None,
scheme: Optional[str] = None,
pattern: Optional[str] = None
) -> None:
self.scheme = scheme
self.pattern = pattern
self.language = language
def __call__(self, view: sublime.View) -> bool:
"""Does this filter match the view? An empty filter matches any view."""
if self.language:
syntax = view.syntax()
if not syntax or basescope2languageid(syntax.scope) != self.language:
return False
if self.scheme:
uri = view.settings().get("lsp_uri")
if isinstance(uri, str) and urllib.parse.urlparse(uri).scheme != self.scheme:
return False
if self.pattern:
if not globmatch(view.file_name() or "", self.pattern, flags=GLOBSTAR | BRACE):
return False
return True
class DocumentSelector:
"""
A DocumentSelector is a list of DocumentFilters. A view matches a DocumentSelector if and only if any one of its
filters matches against the view.
"""
__slots__ = ("filters",)
def __init__(self, document_selector: List[Dict[str, Any]]) -> None:
self.filters = [DocumentFilter(**document_filter) for document_filter in document_selector]
def __bool__(self) -> bool:
return bool(self.filters)
def matches(self, view: sublime.View) -> bool:
"""Does this selector match the view? A selector with no filters matches all views."""
return any(f(view) for f in self.filters) if self.filters else True
# method -> (capability dotted path, optional registration dotted path)
# these are the EXCEPTIONS. The general rule is: method foo/bar --> (barProvider, barProvider.id)
_METHOD_TO_CAPABILITY_EXCEPTIONS = {
'workspace/symbol': ('workspaceSymbolProvider', None),
'workspace/didChangeWorkspaceFolders': ('workspace.workspaceFolders',
'workspace.workspaceFolders.changeNotifications'),
'textDocument/didOpen': ('textDocumentSync.didOpen', None),
'textDocument/didClose': ('textDocumentSync.didClose', None),
'textDocument/didChange': ('textDocumentSync.change', None),
'textDocument/didSave': ('textDocumentSync.save', None),
'textDocument/willSave': ('textDocumentSync.willSave', None),
'textDocument/willSaveWaitUntil': ('textDocumentSync.willSaveWaitUntil', None),
'textDocument/formatting': ('documentFormattingProvider', None),
'textDocument/documentColor': ('colorProvider', None)
} # type: Dict[str, Tuple[str, Optional[str]]]
def method_to_capability(method: str) -> Tuple[str, str]:
"""
Given a method, returns the corresponding capability path, and the associated path to stash the registration key.
Examples:
textDocument/definition --> (definitionProvider, definitionProvider.id)
textDocument/references --> (referencesProvider, referencesProvider.id)
textDocument/didOpen --> (textDocumentSync.didOpen, textDocumentSync.didOpen.id)
"""
capability_path, registration_path = _METHOD_TO_CAPABILITY_EXCEPTIONS.get(method, (None, None))
if capability_path is None:
capability_path = method.split('/')[1] + "Provider"
if registration_path is None:
# This path happens to coincide with the StaticRegistrationOptions' id, which is on purpose. As a consequence,
# if a server made a "registration" via the initialize response, it can call client/unregisterCapability at
# a later date, and the capability will pop from the capabilities dict.
registration_path = capability_path + ".id"
return capability_path, registration_path
def normalize_text_sync(textsync: Union[None, int, Dict[str, Any]]) -> Dict[str, Any]:
"""
Brings legacy text sync capabilities to the most modern format
"""
result = {} # type: Dict[str, Any]
if isinstance(textsync, int):
change = {"syncKind": textsync} # type: Optional[Dict[str, Any]]
result["textDocumentSync"] = {"didOpen": {}, "save": {}, "didClose": {}, "change": change}
elif isinstance(textsync, dict):
new = {}
change = textsync.get("change")
if isinstance(change, int):
new["change"] = {"syncKind": change}
elif isinstance(change, dict):
new["change"] = change
def maybe_assign_bool_or_dict(key: str) -> None:
assert isinstance(textsync, dict)
value = textsync.get(key)
if isinstance(value, bool) and value:
new[key] = {}
elif isinstance(value, dict):
new[key] = value
open_close = textsync.get("openClose")
if isinstance(open_close, bool):
if open_close:
new["didOpen"] = {}
new["didClose"] = {}
else:
maybe_assign_bool_or_dict("didOpen")
maybe_assign_bool_or_dict("didClose")
maybe_assign_bool_or_dict("willSave")
maybe_assign_bool_or_dict("willSaveWaitUntil")
maybe_assign_bool_or_dict("save")
result["textDocumentSync"] = new
return result
class Capabilities(DottedDict):
"""
Maintains static and dynamic capabilities
Static capabilities come from a response to the initialize request (from Client -> Server).
Dynamic capabilities can be registered at any moment with client/registerCapability and client/unregisterCapability
(from Server -> Client).
"""
def register(
self,
registration_id: str,
capability_path: str,
registration_path: str,
options: Dict[str, Any]
) -> None:
stored_registration_id = self.get(registration_path)
if isinstance(stored_registration_id, str):
msg = "{} is already registered at {} with ID {}, overwriting"
debug(msg.format(capability_path, registration_path, stored_registration_id))
self.set(capability_path, options)
self.set(registration_path, registration_id)
def unregister(
self,
registration_id: str,
capability_path: str,
registration_path: str
) -> Optional[Dict[str, Any]]:
stored_registration_id = self.get(registration_path)
if not isinstance(stored_registration_id, str):
debug("stored registration ID at", registration_path, "is not a string")
return None
elif stored_registration_id != registration_id:
msg = "stored registration ID ({}) is not the same as the provided registration ID ({})"
debug(msg.format(stored_registration_id, registration_id))
return None
else:
discarded = self.get(capability_path)
self.remove(capability_path)
self.remove(registration_path)
return discarded
def assign(self, d: Dict[str, Any]) -> None:
textsync = normalize_text_sync(d.pop("textDocumentSync", None))
super().assign(d)
if textsync:
self.update(textsync)
def should_notify_did_open(self) -> bool:
return "textDocumentSync.didOpen" in self
def text_sync_kind(self) -> int:
value = self.get("textDocumentSync.change.syncKind")
return value if isinstance(value, int) else TextDocumentSyncKindNone
def should_notify_did_change_workspace_folders(self) -> bool:
return "workspace.workspaceFolders.changeNotifications" in self
def should_notify_will_save(self) -> bool:
return "textDocumentSync.willSave" in self
def should_notify_did_save(self) -> Tuple[bool, bool]:
save = self.get("textDocumentSync.save")
if isinstance(save, bool):
return save, False
elif isinstance(save, dict):
return True, bool(save.get("includeText"))
else:
return False, False
def should_notify_did_close(self) -> bool:
return "textDocumentSync.didClose" in self
def _translate_path(path: str, source: str, destination: str) -> Tuple[str, bool]:
# TODO: Case-insensitive file systems. Maybe this problem needs a much larger refactor. Even Sublime Text doesn't
# handle case-insensitive file systems correctly. There are a few other places where case-sensitivity matters, for
# example when looking up the correct view for diagnostics, and when finding a view for goto-def.
if path.startswith(source) and len(path) > len(source) and path[len(source)] in ("/", "\\"):
return path.replace(source, destination, 1), True
return path, False
class PathMap:
__slots__ = ("_local", "_remote")
def __init__(self, local: str, remote: str) -> None:
self._local = local
self._remote = remote
@classmethod
def parse(cls, json: Any) -> "Optional[List[PathMap]]":
if not isinstance(json, list):
return None
result = [] # type: List[PathMap]
for path_map in json:
if not isinstance(path_map, dict):
debug('path map entry is not an object')
continue
local = path_map.get("local")
if not isinstance(local, str):
debug('missing "local" key for path map entry')
continue
remote = path_map.get("remote")
if not isinstance(remote, str):
debug('missing "remote" key for path map entry')
continue
result.append(PathMap(local, remote))
return result
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PathMap):
return False
return self._local == other._local and self._remote == other._remote
def map_from_local_to_remote(self, uri: str) -> Tuple[str, bool]:
return _translate_path(uri, self._local, self._remote)
def map_from_remote_to_local(self, uri: str) -> Tuple[str, bool]:
return _translate_path(uri, self._remote, self._local)
class TransportConfig:
__slots__ = ("name", "command", "tcp_port", "env", "listener_socket")
def __init__(
self,
name: str,
command: List[str],
tcp_port: Optional[int],
env: Dict[str, str],
listener_socket: Optional[socket.socket]
) -> None:
if not command and not tcp_port:
raise ValueError('neither "command" nor "tcp_port" is provided; cannot start a language server')
self.name = name
self.command = command
self.tcp_port = tcp_port
self.env = env
self.listener_socket = listener_socket
class ClientConfig:
def __init__(self,
name: str,
selector: str,
priority_selector: Optional[str] = None,
schemes: Optional[List[str]] = None,
command: Optional[List[str]] = None,
binary_args: Optional[List[str]] = None, # DEPRECATED
tcp_port: Optional[int] = None,
auto_complete_selector: Optional[str] = None,
enabled: bool = True,
init_options: DottedDict = DottedDict(),
settings: DottedDict = DottedDict(),
env: Dict[str, str] = {},
experimental_capabilities: Optional[Dict[str, Any]] = None,
disabled_capabilities: DottedDict = DottedDict(),
file_watcher: FileWatcherConfig = {},
path_maps: Optional[List[PathMap]] = None) -> None:
self.name = name
self.selector = selector
self.priority_selector = priority_selector if priority_selector else self.selector
if isinstance(schemes, list):
self.schemes = schemes # type: List[str]
else:
self.schemes = ["file"]
if isinstance(command, list):
self.command = command
else:
assert isinstance(binary_args, list)
self.command = binary_args
self.tcp_port = tcp_port
self.auto_complete_selector = auto_complete_selector
self.enabled = enabled
self.init_options = init_options
self.settings = settings
self.env = env
self.experimental_capabilities = experimental_capabilities
self.disabled_capabilities = disabled_capabilities
self.file_watcher = file_watcher
self.path_maps = path_maps
self.status_key = "lsp_{}".format(self.name)
@classmethod
def from_sublime_settings(cls, name: str, s: sublime.Settings, file: str) -> "ClientConfig":
base = sublime.decode_value(sublime.load_resource(file))
settings = DottedDict(base.get("settings", {})) # defined by the plugin author
settings.update(read_dict_setting(s, "settings", {})) # overrides from the user
init_options = DottedDict(base.get("initializationOptions", {}))
init_options.update(read_dict_setting(s, "initializationOptions", {}))
disabled_capabilities = s.get("disabled_capabilities")
file_watcher = cast(FileWatcherConfig, read_dict_setting(s, "file_watcher", {}))
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = DottedDict()
return ClientConfig(
name=name,
selector=_read_selector(s),
priority_selector=_read_priority_selector(s),
schemes=s.get("schemes"),
command=read_list_setting(s, "command", []),
tcp_port=s.get("tcp_port"),
auto_complete_selector=s.get("auto_complete_selector"),
# Default to True, because an LSP plugin is enabled iff it is enabled as a Sublime package.
enabled=bool(s.get("enabled", True)),
init_options=init_options,
settings=settings,
env=read_dict_setting(s, "env", {}),
experimental_capabilities=s.get("experimental_capabilities"),
disabled_capabilities=disabled_capabilities,
file_watcher=file_watcher,
path_maps=PathMap.parse(s.get("path_maps"))
)
@classmethod
def from_dict(cls, name: str, d: Dict[str, Any]) -> "ClientConfig":
disabled_capabilities = d.get("disabled_capabilities")
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = DottedDict()
schemes = d.get("schemes")
if not isinstance(schemes, list):
schemes = ["file"]
return ClientConfig(
name=name,
selector=_read_selector(d),
priority_selector=_read_priority_selector(d),
schemes=schemes,
command=d.get("command", []),
tcp_port=d.get("tcp_port"),
auto_complete_selector=d.get("auto_complete_selector"),
enabled=d.get("enabled", False),
init_options=DottedDict(d.get("initializationOptions")),
settings=DottedDict(d.get("settings")),
env=d.get("env", dict()),
experimental_capabilities=d.get("experimental_capabilities"),
disabled_capabilities=disabled_capabilities,
file_watcher=d.get("file_watcher", dict()),
path_maps=PathMap.parse(d.get("path_maps"))
)
@classmethod
def from_config(cls, src_config: "ClientConfig", override: Dict[str, Any]) -> "ClientConfig":
path_map_override = PathMap.parse(override.get("path_maps"))
disabled_capabilities = override.get("disabled_capabilities")
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = src_config.disabled_capabilities
return ClientConfig(
name=src_config.name,
selector=_read_selector(override) or src_config.selector,
priority_selector=_read_priority_selector(override) or src_config.priority_selector,
schemes=override.get("schemes", src_config.schemes),
command=override.get("command", src_config.command),
tcp_port=override.get("tcp_port", src_config.tcp_port),
auto_complete_selector=override.get("auto_complete_selector", src_config.auto_complete_selector),
enabled=override.get("enabled", src_config.enabled),
init_options=DottedDict.from_base_and_override(
src_config.init_options, override.get("initializationOptions")),
settings=DottedDict.from_base_and_override(src_config.settings, override.get("settings")),
env=override.get("env", src_config.env),
experimental_capabilities=override.get(
"experimental_capabilities", src_config.experimental_capabilities),
disabled_capabilities=disabled_capabilities,
file_watcher=override.get("file_watcher", src_config.file_watcher),
path_maps=path_map_override if path_map_override else src_config.path_maps
)
def resolve_transport_config(self, variables: Dict[str, str]) -> TransportConfig:
tcp_port = None # type: Optional[int]
listener_socket = None # type: Optional[socket.socket]
if self.tcp_port is not None:
# < 0 means we're hosting a TCP server
if self.tcp_port < 0:
# -1 means pick any free port
if self.tcp_port < -1:
tcp_port = -self.tcp_port
# Create a listener socket for incoming connections
listener_socket = _start_tcp_listener(tcp_port)
tcp_port = int(listener_socket.getsockname()[1])
else:
tcp_port = _find_free_port() if self.tcp_port == 0 else self.tcp_port
if tcp_port is not None:
variables["port"] = str(tcp_port)
command = sublime.expand_variables(self.command, variables)
command = [os.path.expanduser(arg) for arg in command]
if tcp_port is not None:
# DEPRECATED -- replace {port} with $port or ${port} in your client config
command = [a.replace('{port}', str(tcp_port)) for a in command]
env = os.environ.copy()
for key, value in self.env.items():
if key == 'PATH':
env[key] = sublime.expand_variables(value, variables) + os.path.pathsep + env[key]
else:
env[key] = sublime.expand_variables(value, variables)
return TransportConfig(self.name, command, tcp_port, env, listener_socket)
def set_view_status(self, view: sublime.View, message: str) -> None:
if sublime.load_settings("LSP.sublime-settings").get("show_view_status"):
status = "{}: {}".format(self.name, message) if message else self.name
view.set_status(self.status_key, status)
def erase_view_status(self, view: sublime.View) -> None:
view.erase_status(self.status_key)
def match_view(self, view: sublime.View, scheme: str) -> bool:
syntax = view.syntax()
if not syntax:
return False
# Every part of a x.y.z scope seems to contribute 8.
# An empty selector result in a score of 1.
# A non-matching non-empty selector results in a score of 0.
# We want to match at least one part of an x.y.z, and we don't want to match on empty selectors.
return scheme in self.schemes and sublime.score_selector(syntax.scope, self.selector) >= 8
def map_client_path_to_server_uri(self, path: str) -> str:
if self.path_maps:
for path_map in self.path_maps:
path, mapped = path_map.map_from_local_to_remote(path)
if mapped:
break
return filename_to_uri(path)
def map_server_uri_to_client_path(self, uri: str) -> str:
path = uri_to_filename(uri)
if self.path_maps:
for path_map in self.path_maps:
path, mapped = path_map.map_from_remote_to_local(path)
if mapped:
break
return path
def is_disabled_capability(self, capability_path: str) -> bool:
for value in self.disabled_capabilities.walk(capability_path):
if isinstance(value, bool):
return value
elif isinstance(value, dict):
if value:
# If it's not empty we'll continue the walk
continue
else:
# This might be a leaf node
return True
return False
def filter_out_disabled_capabilities(self, capability_path: str, options: Dict[str, Any]) -> Dict[str, Any]:
result = {} # type: Dict[str, Any]
for k, v in options.items():
if not self.is_disabled_capability("{}.{}".format(capability_path, k)):
result[k] = v
return result
def __repr__(self) -> str:
items = [] # type: List[str]
for k, v in self.__dict__.items():
if not k.startswith("_"):
items.append("{}={}".format(k, repr(v)))
return "{}({})".format(self.__class__.__name__, ", ".join(items))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, ClientConfig):
return False
for k, v in self.__dict__.items():
if not k.startswith("_") and v != getattr(other, k):
return False
return True
def syntax2scope(syntax_path: str) -> Optional[str]:
syntax = sublime.syntax_from_path(syntax_path)
return syntax.scope if syntax else None
def view2scope(view: sublime.View) -> str:
try:
return view.scope_name(0).split()[0]
except IndexError:
return ''
def _read_selector(config: Union[sublime.Settings, Dict[str, Any]]) -> str:
# Best base scenario,
selector = config.get("selector")
if isinstance(selector, str):
return selector
# Otherwise, look for "languages": [...]
languages = config.get("languages")
if isinstance(languages, list):
selectors = []
for language in languages:
# First priority is document_selector,
document_selector = language.get("document_selector")
if isinstance(document_selector, str):
selectors.append(document_selector)
continue
# After that syntaxes has priority,
syntaxes = language.get("syntaxes")
if isinstance(syntaxes, list):
for path in syntaxes:
syntax = sublime.syntax_from_path(path)
if syntax:
selectors.append(syntax.scope)
continue
# No syntaxes and no document_selector... then there must exist a languageId.
language_id = language.get("languageId")
if isinstance(language_id, str):
selectors.append("source.{}".format(language_id))
return "|".join(map("({})".format, selectors))
# Otherwise, look for "document_selector"
document_selector = config.get("document_selector")
if isinstance(document_selector, str):
return document_selector
# Otherwise, look for "syntaxes": [...]
syntaxes = config.get("syntaxes")
if isinstance(syntaxes, list):
selectors = []
for path in syntaxes:
syntax = sublime.syntax_from_path(path)
if syntax:
selectors.append(syntax.scope)
return "|".join(selectors)
# No syntaxes and no document_selector... then there must exist a languageId.
language_id = config.get("languageId")
if language_id:
return "source.{}".format(language_id)
return ""
def _read_priority_selector(config: Union[sublime.Settings, Dict[str, Any]]) -> str:
# Best case scenario
selector = config.get("priority_selector")
if isinstance(selector, str):
return selector
# Otherwise, look for "languages": [...]
languages = config.get("languages")
if isinstance(languages, list):
selectors = []
for language in languages:
# First priority is feature_selector.
feature_selector = language.get("feature_selector")
if isinstance(feature_selector, str):
selectors.append(feature_selector)
continue
# After that scopes has priority.
scopes = language.get("scopes")
if isinstance(scopes, list):
selectors.extend(scopes)
continue
# No scopes and no feature_selector. So there must be a languageId
language_id = language.get("languageId")
if isinstance(language_id, str):
selectors.append("source.{}".format(language_id))
return "|".join(map("({})".format, selectors))
# Otherwise, look for "feature_selector"
feature_selector = config.get("feature_selector")
if isinstance(feature_selector, str):
return feature_selector
# Otherwise, look for "scopes": [...]
scopes = config.get("scopes")
if isinstance(scopes, list):
return "|".join(map("({})".format, scopes))
# No scopes and no feature_selector... then there must exist a languageId
language_id = config.get("languageId")
if language_id:
return "source.{}".format(language_id)
return ""
def _find_free_port() -> int:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _start_tcp_listener(tcp_port: Optional[int]) -> socket.socket:
sock = socket.socket()
sock.bind(('localhost', tcp_port or 0))
sock.settimeout(TCP_CONNECT_TIMEOUT)
sock.listen(1)
return sock
| [] | [] | [] | [] | [] | python | 0 | 0 | |
login.py | import base64
import hashlib
import json
import os
import pathlib
import requests
import secrets
import threading
import urllib
import webbrowser
from time import sleep
from werkzeug.serving import make_server
import dotenv
from flask import Flask, request
# Load Settings from dotenv
env_path = pathlib.Path('.') / '.env'
dotenv.load_dotenv(dotenv_path=env_path)
# Set Identity Provider Settings
auth_listener_host = os.getenv('AUTH_LISTENER_HOST')
auth_listener_port = os.getenv('AUTH_LISTENER_PORT')
auth_client_id = os.getenv('AUTH_CLIENT_ID')
auth_tenant = os.getenv('AUTH_TENANT')
# auth_clients_url = os.getenv('AUTH_CLIENTS_URL')
auth_authorize_url = os.getenv('AUTH_AUTHORIZE_URL')
auth_token_url = os.getenv('AUTH_TOKEN_URL')
auth_audience_url = os.getenv('AUTH_AUDIENCE_URL')
auth_scopes = os.getenv('AUTH_SCOPES')
# Setup Auth Listener
app = Flask(__name__)
@app.route("/callback")
def callback():
"""
The callback is invoked after a completed login attempt (succesful or otherwise).
It sets global variables with the auth code or error messages, then sets the
polling flag received_callback.
:return:
"""
global received_callback, code, error_message, received_state
error_message = None
code = None
if 'error' in request.args:
error_message = request.args['error'] + ': ' + request.args['error_description']
else:
code = request.args['code']
received_state = request.args['state']
received_callback = True
return "Please return to your application now."
class ServerThread(threading.Thread):
"""
The Flask server is done this way to allow shutting down after a single request has been received.
"""
def __init__(self, app):
threading.Thread.__init__(self)
self.srv = make_server(auth_listener_host, auth_listener_port, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
print('starting server')
self.srv.serve_forever()
def shutdown(self):
self.srv.shutdown()
def auth_url_encode(byte_data):
"""
Safe encoding handles + and /, and also replace = with nothing
:param byte_data:
:return:
"""
return base64.urlsafe_b64encode(byte_data).decode('utf-8').replace('=', '')
def generate_challenge(a_verifier):
return auth_url_encode(hashlib.sha256(a_verifier.encode()).digest())
# Setup auth variables
verifier = auth_url_encode(secrets.token_bytes(32))
challenge = generate_challenge(verifier)
state = auth_url_encode(secrets.token_bytes(32))
redirect_uri = f"http://{auth_listener_host}:{auth_listener_port}/callback"
# We generate a nonce (state) that is used to protect against attackers invoking the callback
# base_url = 'https://%s.auth0.com/authorize?' % tenant
base_url = f"{auth_authorize_url}?"
url_parameters = {
# 'audience': auth_audience_url,
'scope': auth_scopes,
'response_type': 'code',
'redirect_uri': redirect_uri,
'client_id': auth_client_id,
'code_challenge': challenge.replace('=', ''),
'code_challenge_method': 'S256',
'state': state
}
url = base_url + urllib.parse.urlencode(url_parameters)
# Open the browser window to the login url
# Start the server
# Poll til the callback has been invoked
received_callback = False
webbrowser.open_new(url)
server = ServerThread(app)
server.start()
while not received_callback:
sleep(1)
server.shutdown()
if state != received_state:
print("Error: session replay or similar attack in progress. Please log out of all connections.")
exit(-1)
if error_message:
print("An error occurred:")
print(error_message)
exit(-1)
# Exchange the code for a token
# url = 'https://%s.auth0.com/oauth/token' % tenant
url = auth_token_url
headers = {'Content-Type': 'application/json'}
body = {'grant_type': 'authorization_code',
'client_id': client_id,
'code_verifier': verifier,
'code': code,
'audience': 'https://gateley-empire-life.auth0.com/api/v2/',
'redirect_uri': redirect_uri}
r = requests.post(url, headers=headers, data=json.dumps(body))
data = r.json()
print("REQUEST RESULTS:")
print(json.dumps(data))
# Use the token to list the clients
# url = 'https://%s.auth0.com/api/v2/clients' % tenant
# url = auth_clients_url
# headers = {'Authorization': 'Bearer %s' % data['access_token']}
# r = requests.get(url, headers=headers)
# data = r.json()
# for client in data:
# print("Client: " + client['name'])
| [] | [] | [
"AUTH_TOKEN_URL",
"AUTH_AUDIENCE_URL",
"AUTH_LISTENER_HOST",
"AUTH_LISTENER_PORT",
"AUTH_TENANT",
"AUTH_AUTHORIZE_URL",
"AUTH_CLIENT_ID",
"AUTH_CLIENTS_URL",
"AUTH_SCOPES"
] | [] | ["AUTH_TOKEN_URL", "AUTH_AUDIENCE_URL", "AUTH_LISTENER_HOST", "AUTH_LISTENER_PORT", "AUTH_TENANT", "AUTH_AUTHORIZE_URL", "AUTH_CLIENT_ID", "AUTH_CLIENTS_URL", "AUTH_SCOPES"] | python | 9 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18383 if testnet else 8383
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| [] | [] | [
"APPDATA"
] | [] | ["APPDATA"] | python | 1 | 0 | |
goinsta.go | package goinsta
import (
"crypto/tls"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/cookiejar"
neturl "net/url"
"os"
"path/filepath"
"strconv"
"time"
)
// Instagram represent the main API handler
//
// Profiles: Represents instragram's user profile.
// Account: Represents instagram's personal account.
// Search: Represents instagram's search.
// Timeline: Represents instagram's timeline.
// Activity: Represents instagram's user activity.
// Inbox: Represents instagram's messages.
// Location: Represents instagram's locations.
//
// See Scheme section in README.md for more information.
//
// We recommend to use Export and Import functions after first Login.
//
// Also you can use SetProxy and UnsetProxy to set and unset proxy.
// Golang also provides the option to set a proxy using HTTP_PROXY env var.
type Instagram struct {
user string
pass string
// device id: android-1923fjnma8123
dID string
// uuid: 8493-1233-4312312-5123
uuid string
// rankToken
rankToken string
// token
token string
// phone id
pid string
// ads id
adid string
// challenge URL
challengeURL string
// Instagram objects
// Challenge controls security side of account (Like sms verify / It was me)
Challenge *Challenge
// Profiles is the user interaction
Profiles *Profiles
// Account stores all personal data of the user and his/her options.
Account *Account
// Search performs searching of multiple things (users, locations...)
Search *Search
// Timeline allows to receive timeline media.
Timeline *Timeline
// Activity are instagram notifications.
Activity *Activity
// Inbox are instagram message/chat system.
Inbox *Inbox
// Feed for search over feeds
Feed *Feed
// User contacts from mobile address book
Contacts *Contacts
// Location instance
Locations *LocationInstance
c *http.Client
}
// SetHTTPClient sets http client. This further allows users to use this functionality
// for HTTP testing using a mocking HTTP client Transport, which avoids direct calls to
// the Instagram, instead of returning mocked responses.
func (inst *Instagram) SetHTTPClient(client *http.Client) {
inst.c = client
}
// SetHTTPTransport sets http transport. This further allows users to tweak the underlying
// low level transport for adding additional fucntionalities.
func (inst *Instagram) SetHTTPTransport(transport http.RoundTripper) {
inst.c.Transport = transport
}
// SetDeviceID sets device id
func (inst *Instagram) SetDeviceID(id string) {
inst.dID = id
}
// SetUUID sets uuid
func (inst *Instagram) SetUUID(uuid string) {
inst.uuid = uuid
}
// SetPhoneID sets phone id
func (inst *Instagram) SetPhoneID(id string) {
inst.pid = id
}
// SetCookieJar sets the Cookie Jar. This further allows to use a custom implementation
// of a cookie jar which may be backed by a different data store such as redis.
func (inst *Instagram) SetCookieJar(jar http.CookieJar) error {
url, err := neturl.Parse(goInstaAPIUrl)
if err != nil {
return err
}
// First grab the cookies from the existing jar and we'll put it in the new jar.
cookies := inst.c.Jar.Cookies(url)
inst.c.Jar = jar
inst.c.Jar.SetCookies(url, cookies)
return nil
}
// New creates Instagram structure
func New(username, password string) *Instagram {
// this call never returns error
jar, _ := cookiejar.New(nil)
inst := &Instagram{
user: username,
pass: password,
dID: generateDeviceID(
generateMD5Hash(username + password),
),
uuid: generateUUID(), // both uuid must be differents
pid: generateUUID(),
c: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
Jar: jar,
},
}
inst.init()
return inst
}
func (inst *Instagram) init() {
inst.Challenge = newChallenge(inst)
inst.Profiles = newProfiles(inst)
inst.Activity = newActivity(inst)
inst.Timeline = newTimeline(inst)
inst.Search = newSearch(inst)
inst.Inbox = newInbox(inst)
inst.Feed = newFeed(inst)
inst.Contacts = newContacts(inst)
inst.Locations = newLocation(inst)
}
// SetProxy sets proxy for connection.
func (inst *Instagram) SetProxy(url string, insecure bool) error {
uri, err := neturl.Parse(url)
if err == nil {
inst.c.Transport = &http.Transport{
Proxy: http.ProxyURL(uri),
TLSClientConfig: &tls.Config{
InsecureSkipVerify: insecure,
},
}
}
return err
}
// UnsetProxy unsets proxy for connection.
func (inst *Instagram) UnsetProxy() {
inst.c.Transport = nil
}
// Save exports config to ~/.goinsta
func (inst *Instagram) Save() error {
home := os.Getenv("HOME")
if home == "" {
home = os.Getenv("home") // for plan9
}
return inst.Export(filepath.Join(home, ".goinsta"))
}
// Export exports *Instagram object options
func (inst *Instagram) Export(path string) error {
url, err := neturl.Parse(goInstaAPIUrl)
if err != nil {
return err
}
config := ConfigFile{
ID: inst.Account.ID,
User: inst.user,
DeviceID: inst.dID,
UUID: inst.uuid,
RankToken: inst.rankToken,
Token: inst.token,
PhoneID: inst.pid,
Cookies: inst.c.Jar.Cookies(url),
}
bytes, err := json.Marshal(config)
if err != nil {
return err
}
return ioutil.WriteFile(path, bytes, 0644)
}
// Export exports selected *Instagram object options to an io.Writer
func Export(inst *Instagram, writer io.Writer) error {
url, err := neturl.Parse(goInstaAPIUrl)
if err != nil {
return err
}
config := ConfigFile{
ID: inst.Account.ID,
User: inst.user,
DeviceID: inst.dID,
UUID: inst.uuid,
RankToken: inst.rankToken,
Token: inst.token,
PhoneID: inst.pid,
Cookies: inst.c.Jar.Cookies(url),
}
bytes, err := json.Marshal(config)
if err != nil {
return err
}
_, err = writer.Write(bytes)
return err
}
// ImportReader imports instagram configuration from io.Reader
//
// This function does not set proxy automatically. Use SetProxy after this call.
func ImportReader(r io.Reader) (*Instagram, error) {
bytes, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
config := ConfigFile{}
err = json.Unmarshal(bytes, &config)
if err != nil {
return nil, err
}
return ImportConfig(config)
}
// ImportConfig imports instagram configuration from a configuration object.
//
// This function does not set proxy automatically. Use SetProxy after this call.
func ImportConfig(config ConfigFile) (*Instagram, error) {
url, err := neturl.Parse(goInstaAPIUrl)
if err != nil {
return nil, err
}
inst := &Instagram{
user: config.User,
dID: config.DeviceID,
uuid: config.UUID,
rankToken: config.RankToken,
token: config.Token,
pid: config.PhoneID,
c: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
}
inst.c.Jar, err = cookiejar.New(nil)
if err != nil {
return inst, err
}
inst.c.Jar.SetCookies(url, config.Cookies)
inst.init()
inst.Account = &Account{inst: inst, ID: config.ID}
inst.Account.Sync()
return inst, nil
}
// Import imports instagram configuration
//
// This function does not set proxy automatically. Use SetProxy after this call.
func Import(path string) (*Instagram, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return ImportReader(f)
}
func (inst *Instagram) readMsisdnHeader() error {
data, err := json.Marshal(
map[string]string{
"device_id": inst.uuid,
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlMsisdnHeader,
IsPost: true,
Connection: "keep-alive",
Query: generateSignature(b2s(data)),
},
)
return err
}
func (inst *Instagram) contactPrefill() error {
data, err := json.Marshal(
map[string]string{
"phone_id": inst.pid,
"_csrftoken": inst.token,
"usage": "prefill",
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlContactPrefill,
IsPost: true,
Connection: "keep-alive",
Query: generateSignature(b2s(data)),
},
)
return err
}
func (inst *Instagram) zrToken() error {
_, err := inst.sendRequest(
&reqOptions{
Endpoint: urlZrToken,
IsPost: false,
Connection: "keep-alive",
Query: map[string]string{
"device_id": inst.dID,
"token_hash": "",
"custom_device_id": inst.uuid,
"fetch_reason": "token_expired",
},
},
)
return err
}
func (inst *Instagram) sendAdID() error {
data, err := inst.prepareData(
map[string]interface{}{
"adid": inst.adid,
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlLogAttribution,
IsPost: true,
Connection: "keep-alive",
Query: generateSignature(data),
},
)
return err
}
// Login performs instagram login.
//
// Password will be deleted after login
func (inst *Instagram) Login() error {
err := inst.readMsisdnHeader()
if err != nil {
return err
}
err = inst.syncFeatures()
if err != nil {
return err
}
err = inst.zrToken()
if err != nil {
return err
}
err = inst.sendAdID()
if err != nil {
return err
}
err = inst.contactPrefill()
if err != nil {
return err
}
result, err := json.Marshal(
map[string]interface{}{
"guid": inst.uuid,
"login_attempt_count": 0,
"_csrftoken": inst.token,
"device_id": inst.dID,
"adid": inst.adid,
"phone_id": inst.pid,
"username": inst.user,
"password": inst.pass,
"google_tokens": "[]",
},
)
if err != nil {
return err
}
body, err := inst.sendRequest(
&reqOptions{
Endpoint: urlLogin,
Query: generateSignature(b2s(result)),
IsPost: true,
Login: true,
},
)
if err != nil {
return err
}
inst.pass = ""
// getting account data
res := accountResp{}
err = json.Unmarshal(body, &res)
if err != nil {
return err
}
inst.Account = &res.Account
inst.Account.inst = inst
inst.rankToken = strconv.FormatInt(inst.Account.ID, 10) + "_" + inst.uuid
inst.zrToken()
return err
}
// Logout closes current session
func (inst *Instagram) Logout() error {
_, err := inst.sendSimpleRequest(urlLogout)
inst.c.Jar = nil
inst.c = nil
return err
}
func (inst *Instagram) syncFeatures() error {
data, err := inst.prepareData(
map[string]interface{}{
"id": inst.uuid,
"experiments": goInstaExperiments,
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlQeSync,
Query: generateSignature(data),
IsPost: true,
Login: true,
},
)
return err
}
func (inst *Instagram) megaphoneLog() error {
data, err := inst.prepareData(
map[string]interface{}{
"id": inst.Account.ID,
"type": "feed_aysf",
"action": "seen",
"reason": "",
"device_id": inst.dID,
"uuid": generateMD5Hash(string(time.Now().Unix())),
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlMegaphoneLog,
Query: generateSignature(data),
IsPost: true,
Login: true,
},
)
return err
}
func (inst *Instagram) expose() error {
data, err := inst.prepareData(
map[string]interface{}{
"id": inst.Account.ID,
"experiment": "ig_android_profile_contextual_feed",
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlExpose,
Query: generateSignature(data),
IsPost: true,
},
)
return err
}
// GetMedia returns media specified by id.
//
// The argument can be int64 or string
//
// See example: examples/media/like.go
func (inst *Instagram) GetMedia(o interface{}) (*FeedMedia, error) {
media := &FeedMedia{
inst: inst,
NextID: o,
}
return media, media.Sync()
}
| [
"\"HOME\"",
"\"home\""
] | [] | [
"home",
"HOME"
] | [] | ["home", "HOME"] | go | 2 | 0 | |
cmd/upgrade.go | package cmd
import (
"errors"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/helm/pkg/helm"
"github.com/databus23/helm-diff/v3/diff"
"github.com/databus23/helm-diff/v3/manifest"
)
type diffCmd struct {
release string
chart string
chartVersion string
client helm.Interface
detailedExitCode bool
devel bool
disableValidation bool
disableOpenAPIValidation bool
dryRun bool
namespace string // namespace to assume the release to be installed into. Defaults to the current kube config namespace.
valueFiles valueFiles
values []string
stringValues []string
fileValues []string
reuseValues bool
resetValues bool
allowUnreleased bool
noHooks bool
includeTests bool
suppressedKinds []string
outputContext int
showSecrets bool
postRenderer string
output string
install bool
stripTrailingCR bool
}
func (d *diffCmd) isAllowUnreleased() bool {
// helm update --install is effectively the same as helm-diff's --allow-unreleased option,
// support both so that helm diff plugin can be applied on the same command
// https://github.com/databus23/helm-diff/issues/108
return d.allowUnreleased || d.install
}
const globalUsage = `Show a diff explaining what a helm upgrade would change.
This fetches the currently deployed version of a release
and compares it to a chart plus values.
This can be used visualize what changes a helm upgrade will
perform.
`
func newChartCommand() *cobra.Command {
diff := diffCmd{
namespace: os.Getenv("HELM_NAMESPACE"),
}
cmd := &cobra.Command{
Use: "upgrade [flags] [RELEASE] [CHART]",
Short: "Show a diff explaining what a helm upgrade would change.",
Long: globalUsage,
Example: " helm diff upgrade my-release stable/postgresql --values values.yaml",
Args: func(cmd *cobra.Command, args []string) error {
return checkArgsLength(len(args), "release name", "chart path")
},
PreRun: func(*cobra.Command, []string) {
expandTLSPaths()
},
RunE: func(cmd *cobra.Command, args []string) error {
// Suppress the command usage on error. See #77 for more info
cmd.SilenceUsage = true
if q, _ := cmd.Flags().GetBool("suppress-secrets"); q {
diff.suppressedKinds = append(diff.suppressedKinds, "Secret")
}
diff.release = args[0]
diff.chart = args[1]
if isHelm3() {
return diff.runHelm3()
}
if diff.client == nil {
diff.client = createHelmClient()
}
return diff.run()
},
}
f := cmd.Flags()
var kubeconfig string
f.StringVar(&kubeconfig, "kubeconfig", "", "This flag is ignored, to allow passing of this top level flag to helm")
f.StringVar(&diff.chartVersion, "version", "", "specify the exact chart version to use. If this is not specified, the latest version is used")
f.BoolVar(&diff.detailedExitCode, "detailed-exitcode", false, "return a non-zero exit code when there are changes")
f.BoolP("suppress-secrets", "q", false, "suppress secrets in the output")
f.BoolVar(&diff.showSecrets, "show-secrets", false, "do not redact secret values in the output")
f.VarP(&diff.valueFiles, "values", "f", "specify values in a YAML file (can specify multiple)")
f.StringArrayVar(&diff.values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.StringArrayVar(&diff.stringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.StringArrayVar(&diff.fileValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)")
f.BoolVar(&diff.reuseValues, "reuse-values", false, "reuse the last release's values and merge in any new values. If '--reset-values' is specified, this is ignored")
f.BoolVar(&diff.resetValues, "reset-values", false, "reset the values to the ones built into the chart and merge in any new values")
f.BoolVar(&diff.allowUnreleased, "allow-unreleased", false, "enables diffing of releases that are not yet deployed via Helm")
f.BoolVar(&diff.install, "install", false, "enables diffing of releases that are not yet deployed via Helm (equivalent to --allow-unreleased, added to match \"helm upgrade --install\" command")
f.BoolVar(&diff.noHooks, "no-hooks", false, "disable diffing of hooks")
f.BoolVar(&diff.includeTests, "include-tests", false, "enable the diffing of the helm test hooks")
f.BoolVar(&diff.devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.")
f.StringArrayVar(&diff.suppressedKinds, "suppress", []string{}, "allows suppression of the values listed in the diff output")
f.IntVarP(&diff.outputContext, "context", "C", -1, "output NUM lines of context around changes")
f.BoolVar(&diff.disableValidation, "disable-validation", false, "disables rendered templates validation against the Kubernetes cluster you are currently pointing to. This is the same validation performed on an install")
f.BoolVar(&diff.disableOpenAPIValidation, "disable-openapi-validation", false, "disables rendered templates validation against the Kubernetes OpenAPI Schema")
f.BoolVar(&diff.dryRun, "dry-run", false, "disables cluster access and show diff as if it was install. Implies --install, --reset-values, and --disable-validation")
f.StringVar(&diff.postRenderer, "post-renderer", "", "the path to an executable to be used for post rendering. If it exists in $PATH, the binary will be used, otherwise it will try to look for the executable at the given path")
f.StringVar(&diff.output, "output", "diff", "Possible values: diff, simple, json, template. When set to \"template\", use the env var HELM_DIFF_TPL to specify the template.")
f.BoolVar(&diff.stripTrailingCR, "strip-trailing-cr", false, "strip trailing carriage return on input")
if !isHelm3() {
f.StringVar(&diff.namespace, "namespace", "default", "namespace to assume the release to be installed into")
}
if !isHelm3() {
addCommonCmdOptions(f)
}
return cmd
}
func (d *diffCmd) runHelm3() error {
if err := compatibleHelm3Version(); err != nil {
return err
}
var releaseManifest []byte
var err error
if !d.dryRun {
releaseManifest, err = getRelease(d.release, d.namespace)
}
var newInstall bool
if err != nil && strings.Contains(err.Error(), "release: not found") {
if d.isAllowUnreleased() {
fmt.Printf("********************\n\n\tRelease was not present in Helm. Diff will show entire contents as new.\n\n********************\n")
newInstall = true
err = nil
} else {
fmt.Printf("********************\n\n\tRelease was not present in Helm. Include the `--allow-unreleased` to perform diff without exiting in error.\n\n********************\n")
return err
}
}
if err != nil {
return fmt.Errorf("Failed to get release %s in namespace %s: %s", d.release, d.namespace, err)
}
installManifest, err := d.template(!newInstall)
if err != nil {
return fmt.Errorf("Failed to render chart: %s", err)
}
currentSpecs := make(map[string]*manifest.MappingResult)
if !newInstall && !d.dryRun {
if !d.noHooks {
hooks, err := getHooks(d.release, d.namespace)
if err != nil {
return err
}
releaseManifest = append(releaseManifest, hooks...)
}
if d.includeTests {
currentSpecs = manifest.Parse(string(releaseManifest), d.namespace)
} else {
currentSpecs = manifest.Parse(string(releaseManifest), d.namespace, helm3TestHook, helm2TestSuccessHook)
}
}
var newSpecs map[string]*manifest.MappingResult
if d.includeTests {
newSpecs = manifest.Parse(string(installManifest), d.namespace)
} else {
newSpecs = manifest.Parse(string(installManifest), d.namespace, helm3TestHook, helm2TestSuccessHook)
}
seenAnyChanges := diff.Manifests(currentSpecs, newSpecs, d.suppressedKinds, d.showSecrets, d.outputContext, d.output, d.stripTrailingCR, os.Stdout)
if d.detailedExitCode && seenAnyChanges {
return Error{
error: errors.New("identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled)"),
Code: 2,
}
}
return nil
}
func (d *diffCmd) run() error {
if d.chartVersion == "" && d.devel {
d.chartVersion = ">0.0.0-0"
}
chartPath, err := locateChartPath(d.chart, d.chartVersion, false, "")
if err != nil {
return err
}
if err := d.valueFiles.Valid(); err != nil {
return err
}
rawVals, err := d.vals()
if err != nil {
return err
}
releaseResponse, err := d.client.ReleaseContent(d.release)
var newInstall bool
if err != nil && strings.Contains(err.Error(), fmt.Sprintf("release: %q not found", d.release)) {
if d.isAllowUnreleased() {
fmt.Printf("********************\n\n\tRelease was not present in Helm. Diff will show entire contents as new.\n\n********************\n")
newInstall = true
err = nil
} else {
fmt.Printf("********************\n\n\tRelease was not present in Helm. Include the `--allow-unreleased` to perform diff without exiting in error.\n\n********************\n")
}
}
if err != nil {
return prettyError(err)
}
var currentSpecs, newSpecs map[string]*manifest.MappingResult
if newInstall {
installResponse, err := d.client.InstallRelease(
chartPath,
d.namespace,
helm.ReleaseName(d.release),
helm.ValueOverrides(rawVals),
helm.InstallDryRun(true),
)
if err != nil {
return prettyError(err)
}
currentSpecs = make(map[string]*manifest.MappingResult)
newSpecs = manifest.Parse(installResponse.Release.Manifest, installResponse.Release.Namespace)
} else {
upgradeResponse, err := d.client.UpdateRelease(
d.release,
chartPath,
helm.UpdateValueOverrides(rawVals),
helm.ReuseValues(d.reuseValues),
helm.ResetValues(d.resetValues),
helm.UpgradeDryRun(true),
)
if err != nil {
return prettyError(err)
}
if d.noHooks {
currentSpecs = manifest.Parse(releaseResponse.Release.Manifest, releaseResponse.Release.Namespace)
newSpecs = manifest.Parse(upgradeResponse.Release.Manifest, upgradeResponse.Release.Namespace)
} else {
currentSpecs = manifest.ParseRelease(releaseResponse.Release, d.includeTests)
newSpecs = manifest.ParseRelease(upgradeResponse.Release, d.includeTests)
}
}
seenAnyChanges := diff.Manifests(currentSpecs, newSpecs, d.suppressedKinds, d.showSecrets, d.outputContext, d.output, d.stripTrailingCR, os.Stdout)
if d.detailedExitCode && seenAnyChanges {
return Error{
error: errors.New("identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled)"),
Code: 2,
}
}
return nil
}
| [
"\"HELM_NAMESPACE\""
] | [] | [
"HELM_NAMESPACE"
] | [] | ["HELM_NAMESPACE"] | go | 1 | 0 | |
storage/buckets/main_test.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"testing"
"time"
"github.com/GoogleCloudPlatform/golang-samples/internal/testutil"
"cloud.google.com/go/storage"
)
var (
storageClient *storage.Client
bucketName string
)
func TestMain(m *testing.M) {
// These functions are noisy.
log.SetOutput(ioutil.Discard)
s := m.Run()
log.SetOutput(os.Stderr)
os.Exit(s)
}
func setup(t *testing.T) {
tc := testutil.SystemTest(t)
ctx := context.Background()
var err error
storageClient, err = storage.NewClient(ctx)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
bucketName = tc.ProjectID + "-storage-buckets-tests"
}
func TestCreate(t *testing.T) {
tc := testutil.SystemTest(t)
setup(t)
// Clean up bucket before running tests.
deleteBucket(storageClient, bucketName)
if err := create(storageClient, tc.ProjectID, bucketName); err != nil {
t.Fatalf("failed to create bucket (%q): %v", bucketName, err)
}
}
func TestCreateWithAttrs(t *testing.T) {
tc := testutil.SystemTest(t)
name := bucketName + "-attrs"
// Clean up bucket before running test.
deleteBucket(storageClient, name)
if err := createWithAttrs(storageClient, tc.ProjectID, name); err != nil {
t.Fatalf("failed to create bucket (%q): %v", name, err)
}
if err := deleteBucket(storageClient, name); err != nil {
t.Fatalf("failed to delete bucket (%q): %v", name, err)
}
}
func TestList(t *testing.T) {
tc := testutil.SystemTest(t)
setup(t)
buckets, err := list(storageClient, tc.ProjectID)
if err != nil {
t.Fatal(err)
}
var ok bool
outer:
for attempt := 0; attempt < 5; attempt++ { // for eventual consistency
for _, b := range buckets {
if b == bucketName {
ok = true
break outer
}
}
time.Sleep(2 * time.Second)
}
if !ok {
t.Errorf("got bucket list: %v; want %q in the list", buckets, bucketName)
}
}
func TestGetBucketMetadata(t *testing.T) {
testutil.SystemTest(t)
setup(t)
bucketMetadataBuf := new(bytes.Buffer)
if _, err := getBucketMetadata(bucketMetadataBuf, storageClient, bucketName); err != nil {
t.Errorf("getBucketMetadata: %#v", err)
}
got := bucketMetadataBuf.String()
if want := "BucketName:"; !strings.Contains(got, want) {
t.Errorf("got %q, want %q", got, want)
}
}
func TestIAM(t *testing.T) {
testutil.SystemTest(t)
setup(t)
if _, err := getPolicy(storageClient, bucketName); err != nil {
t.Errorf("getPolicy: %#v", err)
}
if err := addUser(storageClient, bucketName); err != nil {
t.Errorf("addUser: %v", err)
}
if err := removeUser(storageClient, bucketName); err != nil {
t.Errorf("removeUser: %v", err)
}
}
func TestRequesterPays(t *testing.T) {
testutil.SystemTest(t)
setup(t)
if err := enableRequesterPays(storageClient, bucketName); err != nil {
t.Errorf("enableRequesterPays: %#v", err)
}
if err := disableRequesterPays(storageClient, bucketName); err != nil {
t.Errorf("disableRequesterPays: %#v", err)
}
if err := checkRequesterPays(storageClient, bucketName); err != nil {
t.Errorf("checkRequesterPays: %#v", err)
}
}
func TestKMS(t *testing.T) {
tc := testutil.SystemTest(t)
setup(t)
keyRingID := os.Getenv("GOLANG_SAMPLES_KMS_KEYRING")
cryptoKeyID := os.Getenv("GOLANG_SAMPLES_KMS_CRYPTOKEY")
if keyRingID == "" || cryptoKeyID == "" {
t.Skip("GOLANG_SAMPLES_KMS_KEYRING and GOLANG_SAMPLES_KMS_CRYPTOKEY must be set")
}
kmsKeyName := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", tc.ProjectID, "global", keyRingID, cryptoKeyID)
if err := setDefaultKMSkey(storageClient, bucketName, kmsKeyName); err != nil {
t.Fatalf("failed to enable default kms key (%q): %v", bucketName, err)
}
}
func TestBucketLock(t *testing.T) {
tc := testutil.SystemTest(t)
setup(t)
retentionPeriod := 5 * time.Second
if err := setRetentionPolicy(storageClient, bucketName, retentionPeriod); err != nil {
t.Fatalf("failed to set retention policy (%q): %v", bucketName, err)
}
attrs, err := getRetentionPolicy(storageClient, bucketName)
if err != nil {
t.Fatalf("failed to get retention policy (%q): %v", bucketName, err)
}
if attrs.RetentionPolicy.RetentionPeriod != retentionPeriod {
t.Fatalf("retention period is not the expected value (%q): %v", retentionPeriod, attrs.RetentionPolicy.RetentionPeriod)
}
if err := enableDefaultEventBasedHold(storageClient, bucketName); err != nil {
t.Fatalf("failed to enable default event-based hold (%q): %v", bucketName, err)
}
attrs, err = getDefaultEventBasedHold(storageClient, bucketName)
if err != nil {
t.Fatalf("failed to get default event-based hold (%q): %v", bucketName, err)
}
if !attrs.DefaultEventBasedHold {
t.Fatalf("default event-based hold was not enabled")
}
if err := disableDefaultEventBasedHold(storageClient, bucketName); err != nil {
t.Fatalf("failed to disable event-based hold (%q): %v", bucketName, err)
}
attrs, err = getDefaultEventBasedHold(storageClient, bucketName)
if err != nil {
t.Fatalf("failed to get default event-based hold (%q): %v", bucketName, err)
}
if attrs.DefaultEventBasedHold {
t.Fatalf("default event-based hold was not disabled")
}
if err := removeRetentionPolicy(storageClient, bucketName); err != nil {
t.Fatalf("failed to remove retention policy (%q): %v", bucketName, err)
}
attrs, err = getRetentionPolicy(storageClient, bucketName)
if err != nil {
t.Fatalf("failed to get retention policy (%q): %v", bucketName, err)
}
if attrs.RetentionPolicy != nil {
t.Fatalf("retention period to not be set")
}
if err := setRetentionPolicy(storageClient, bucketName, retentionPeriod); err != nil {
t.Fatalf("failed to set retention policy (%q): %v", bucketName, err)
}
testutil.Retry(t, 10, time.Second, func(r *testutil.R) {
if err := lockRetentionPolicy(storageClient, bucketName); err != nil {
r.Errorf("failed to lock retention policy (%q): %v", bucketName, err)
}
attrs, err := getRetentionPolicy(storageClient, bucketName)
if err != nil {
r.Errorf("failed to check if retention policy is locked (%q): %v", bucketName, err)
}
if !attrs.RetentionPolicy.IsLocked {
r.Errorf("retention policy is not locked")
}
})
time.Sleep(5 * time.Second)
deleteBucket(storageClient, bucketName)
time.Sleep(5 * time.Second)
if err := create(storageClient, tc.ProjectID, bucketName); err != nil {
t.Fatalf("failed to create bucket (%q): %v", bucketName, err)
}
}
func TestUniformBucketLevelAccess(t *testing.T) {
setup(t)
if err := enableUniformBucketLevelAccess(storageClient, bucketName); err != nil {
t.Fatalf("failed to enable uniform bucket-level access (%q): %v", bucketName, err)
}
attrs, err := getUniformBucketLevelAccess(storageClient, bucketName)
if err != nil {
t.Fatalf("failed to get uniform bucket-level access attrs (%q): %v", bucketName, err)
}
if !attrs.UniformBucketLevelAccess.Enabled {
t.Fatalf("Uniform bucket-level access was not enabled for (%q).", bucketName)
}
if err := disableUniformBucketLevelAccess(storageClient, bucketName); err != nil {
t.Fatalf("failed to disable uniform bucket-level access (%q): %v", bucketName, err)
}
attrs, err = getUniformBucketLevelAccess(storageClient, bucketName)
if err != nil {
t.Fatalf("failed to get uniform bucket-level access attrs (%q): %v", bucketName, err)
}
if attrs.UniformBucketLevelAccess.Enabled {
t.Fatalf("Uniform bucket-level access was not disabled for (%q).", bucketName)
}
}
func TestDelete(t *testing.T) {
testutil.SystemTest(t)
setup(t)
if err := deleteBucket(storageClient, bucketName); err != nil {
t.Fatalf("failed to delete bucket (%q): %v", bucketName, err)
}
}
| [
"\"GOLANG_SAMPLES_KMS_KEYRING\"",
"\"GOLANG_SAMPLES_KMS_CRYPTOKEY\""
] | [] | [
"GOLANG_SAMPLES_KMS_CRYPTOKEY",
"GOLANG_SAMPLES_KMS_KEYRING"
] | [] | ["GOLANG_SAMPLES_KMS_CRYPTOKEY", "GOLANG_SAMPLES_KMS_KEYRING"] | go | 2 | 0 | |
yuncai/yuncai/wsgi.py | """
WSGI config for yuncai project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yuncai.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
example/mssql/main.go | package main
import (
"database/sql"
"fmt"
_ "gofreetds"
"os"
)
//Example how to use gofreetds as mssql driver for standard sql interface.
//More information on how to use sql interface:
// http://golang.org/pkg/database/sql/
// https://code.google.com/p/go-wiki/wiki/SQLInterface
func main() {
//get connection string
connStr := os.Getenv("GOFREETDS_CONN_STR")
if connStr == "" {
panic("Set connection string for the pubs database in GOFREETDS_CONN_STR environment variable!\n")
}
//get db connection
auId := "172-32-1176"
db, err := sql.Open("mssql", connStr)
if err != nil {
panic(err)
}
defer db.Close()
//use it
row := db.QueryRow("SELECT au_fname, au_lname name FROM authors WHERE au_id = ?", "172-32-1176")
var firstName, lastName string
err = row.Scan(&firstName, &lastName)
if err != nil {
panic(err)
}
//show results
fmt.Printf("author for id: %s is %s %s\n", auId, firstName, lastName)
}
| [
"\"GOFREETDS_CONN_STR\""
] | [] | [
"GOFREETDS_CONN_STR"
] | [] | ["GOFREETDS_CONN_STR"] | go | 1 | 0 | |
internal/config/config.go | package config
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
)
var (
// Default sections
sectionDefaults = []string{"global_tags", "agent", "outputs",
"processors", "aggregators", "inputs"}
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
"processes", "disk", "diskio"}
// Default output plugins
outputDefaults = []string{"influxdb"}
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
// Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has
// specified
type Config struct {
Tags map[string]string
InputFilters []string
OutputFilters []string
Agent *AgentConfig
Inputs []*models.RunningInput
Outputs []*models.RunningOutput
Aggregators []*models.RunningAggregator
// Processors have a slice wrapper type because they need to be sorted
Processors models.RunningProcessors
}
func NewConfig() *Config {
c := &Config{
// Agent defaults:
Agent: &AgentConfig{
Interval: internal.Duration{Duration: 10 * time.Second},
RoundInterval: true,
FlushInterval: internal.Duration{Duration: 10 * time.Second},
LogTarget: "file",
LogfileRotationMaxArchives: 5,
},
Tags: make(map[string]string),
Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*models.RunningOutput, 0),
Processors: make([]*models.RunningProcessor, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
return c
}
type AgentConfig struct {
// Interval at which to gather information
Interval internal.Duration
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
Precision internal.Duration
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
CollectionJitter internal.Duration
// FlushInterval is the Interval at which to flush data
FlushInterval internal.Duration
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
FlushJitter internal.Duration
// MetricBatchSize is the maximum number of metrics that is wrote to an
// output plugin in one call.
MetricBatchSize int
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
MetricBufferLimit int
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
FlushBufferWhenFull bool
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatibility
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode
Debug bool `toml:"debug"`
// Quiet is the option for running in quiet mode
Quiet bool `toml:"quiet"`
// Log target controls the destination for logs and can be one of "file",
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
// is determined by the "logfile" setting.
LogTarget string `toml:"logtarget"`
// Name of the file to be logged to when using the "file" logtarget. If set to
// the empty string then logs are written to stderr.
Logfile string `toml:"logfile"`
// The file will be rotated after the time interval specified. When set
// to 0 no time based rotation is performed.
LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"`
// The logfile will be rotated when it becomes larger than the specified
// size. When set to 0 no size based rotation is performed.
LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"`
// Maximum number of rotated archives to keep, any older logs are deleted.
// If set to -1, no archives are removed.
LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"`
Hostname string
OmitHostname bool
}
// Inputs returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Config.Name)
}
return name
}
// Outputs returns a list of strings of the configured aggregators.
func (c *Config) AggregatorNames() []string {
var name []string
for _, aggregator := range c.Aggregators {
name = append(name, aggregator.Config.Name)
}
return name
}
// Outputs returns a list of strings of the configured processors.
func (c *Config) ProcessorNames() []string {
var name []string
for _, processor := range c.Processors {
name = append(name, processor.Config.Name)
}
return name
}
// Outputs returns a list of strings of the configured outputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
name = append(name, output.Config.Name)
}
return name
}
// ListTags returns a string of tags specified in the config,
// line-protocol style
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
var header = `# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
`
var globalTagsConfig = `
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
`
var agentConfig = `
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log target controls the destination for logs and can be one of "file",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
## is determined by the "logfile" setting.
# logtarget = "file"
## Name of the file to be logged to when using the "file" logtarget. If set to
## the empty string then logs are written to stderr.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0d"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
`
var outputHeader = `
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
`
var processorHeader = `
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
`
var aggregatorHeader = `
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
`
var inputHeader = `
###############################################################################
# INPUT PLUGINS #
###############################################################################
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(
sectionFilters []string,
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
// print headers
fmt.Printf(header)
if len(sectionFilters) == 0 {
sectionFilters = sectionDefaults
}
printFilteredGlobalSections(sectionFilters)
// print output plugins
if sliceContains("outputs", sectionFilters) {
if len(outputFilters) != 0 {
if len(outputFilters) >= 3 && outputFilters[1] != "none" {
fmt.Printf(outputHeader)
}
printFilteredOutputs(outputFilters, false)
} else {
fmt.Printf(outputHeader)
printFilteredOutputs(outputDefaults, false)
// Print non-default outputs, commented
var pnames []string
for pname := range outputs.Outputs {
if !sliceContains(pname, outputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredOutputs(pnames, true)
}
}
// print processor plugins
if sliceContains("processors", sectionFilters) {
if len(processorFilters) != 0 {
if len(processorFilters) >= 3 && processorFilters[1] != "none" {
fmt.Printf(processorHeader)
}
printFilteredProcessors(processorFilters, false)
} else {
fmt.Printf(processorHeader)
pnames := []string{}
for pname := range processors.Processors {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredProcessors(pnames, true)
}
}
// print aggregator plugins
if sliceContains("aggregators", sectionFilters) {
if len(aggregatorFilters) != 0 {
if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" {
fmt.Printf(aggregatorHeader)
}
printFilteredAggregators(aggregatorFilters, false)
} else {
fmt.Printf(aggregatorHeader)
pnames := []string{}
for pname := range aggregators.Aggregators {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredAggregators(pnames, true)
}
}
// print input plugins
if sliceContains("inputs", sectionFilters) {
if len(inputFilters) != 0 {
if len(inputFilters) >= 3 && inputFilters[1] != "none" {
fmt.Printf(inputHeader)
}
printFilteredInputs(inputFilters, false)
} else {
fmt.Printf(inputHeader)
printFilteredInputs(inputDefaults, false)
// Print non-default inputs, commented
var pnames []string
for pname := range inputs.Inputs {
if !sliceContains(pname, inputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredInputs(pnames, true)
}
}
}
func printFilteredProcessors(processorFilters []string, commented bool) {
// Filter processors
var pnames []string
for pname := range processors.Processors {
if sliceContains(pname, processorFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Outputs
for _, pname := range pnames {
creator := processors.Processors[pname]
output := creator()
printConfig(pname, output, "processors", commented)
}
}
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
// Filter outputs
var anames []string
for aname := range aggregators.Aggregators {
if sliceContains(aname, aggregatorFilters) {
anames = append(anames, aname)
}
}
sort.Strings(anames)
// Print Outputs
for _, aname := range anames {
creator := aggregators.Aggregators[aname]
output := creator()
printConfig(aname, output, "aggregators", commented)
}
}
func printFilteredInputs(inputFilters []string, commented bool) {
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if sliceContains(pname, inputFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
// for alphabetical looping:
servInputNames := []string{}
// Print Inputs
for _, pname := range pnames {
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
}
printConfig(pname, input, "inputs", commented)
}
// Print Service Inputs
if len(servInputs) == 0 {
return
}
sort.Strings(servInputNames)
fmt.Printf(serviceInputHeader)
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented)
}
}
func printFilteredOutputs(outputFilters []string, commented bool) {
// Filter outputs
var onames []string
for oname := range outputs.Outputs {
if sliceContains(oname, outputFilters) {
onames = append(onames, oname)
}
}
sort.Strings(onames)
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs", commented)
}
}
func printFilteredGlobalSections(sectionFilters []string) {
if sliceContains("global_tags", sectionFilters) {
fmt.Printf(globalTagsConfig)
}
if sliceContains("agent", sectionFilters) {
fmt.Printf(agentConfig)
}
}
type printer interface {
Description() string
SampleConfig() string
}
func printConfig(name string, p printer, op string, commented bool) {
comment := ""
if commented {
comment = "# "
}
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
op, name)
config := p.SampleConfig()
if config == "" {
fmt.Printf("\n%s # no configuration\n\n", comment)
} else {
lines := strings.Split(config, "\n")
for i, line := range lines {
if i == 0 || i == len(lines)-1 {
fmt.Print("\n")
continue
}
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
}
}
}
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs", false)
} else {
return errors.New(fmt.Sprintf("Input %s not found", name))
}
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs", false)
} else {
return errors.New(fmt.Sprintf("Output %s not found", name))
}
return nil
}
func (c *Config) LoadDirectory(path string) error {
walkfn := func(thispath string, info os.FileInfo, _ error) error {
if info == nil {
log.Printf("W! Telegraf is not permitted to read %s", thispath)
return nil
}
if info.IsDir() {
if strings.HasPrefix(info.Name(), "..") {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath.SkipDir
}
return nil
}
name := info.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" {
return nil
}
err := c.LoadConfig(thispath)
if err != nil {
return err
}
return nil
}
return filepath.Walk(path, walkfn)
}
// Try to find a default config file at these locations (in order):
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
//
func getDefaultConfigPath() (string, error) {
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
programFiles := os.Getenv("ProgramFiles")
if programFiles == "" { // Should never happen
programFiles = `C:\Program Files`
}
etcfile = programFiles + `\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil {
log.Printf("I! Using config file: %s", path)
return path, nil
}
}
// if we got here, we didn't find a file in a default location
return "", fmt.Errorf("No config file specified, and could not find one"+
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
}
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
var err error
if path == "" {
if path, err = getDefaultConfigPath(); err != nil {
return err
}
}
data, err := loadConfig(path)
if err != nil {
return fmt.Errorf("Error loading %s, %s", path, err)
}
tbl, err := parseConfig(data)
if err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
// Parse tags tables first:
for _, tableName := range []string{"tags", "global_tags"} {
if val, ok := tbl.Fields[tableName]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("E! Could not parse [global_tags] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
}
// Parse agent table:
if val, ok := tbl.Fields["agent"]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("E! Could not parse [agent] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
if !c.Agent.OmitHostname {
if c.Agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return err
}
c.Agent.Hostname = hostname
}
c.Tags["host"] = c.Agent.Hostname
}
// Parse all the rest of the plugins:
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
switch name {
case "agent", "global_tags", "tags":
case "outputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [outputs.influxdb] support
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "inputs", "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [inputs.cpu] support
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "processors":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addProcessor(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "aggregators":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addAggregator(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addInput(name, subTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
}
if len(c.Processors) > 1 {
sort.Sort(c.Processors)
}
return nil
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
func loadConfig(config string) ([]byte, error) {
u, err := url.Parse(config)
if err != nil {
return nil, err
}
switch u.Scheme {
case "https", "http":
return fetchConfig(u)
default:
// If it isn't a https scheme, try it as a file.
}
return ioutil.ReadFile(config)
}
func fetchConfig(u *url.URL) ([]byte, error) {
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists {
req.Header.Add("Authorization", "Token "+v)
}
req.Header.Add("Accept", "application/toml")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status)
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// parseConfig loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseConfig(contents []byte) (*ast.Table, error) {
contents = trimBOM(contents)
parameters := envVarRe.FindAllSubmatch(contents, -1)
for _, parameter := range parameters {
if len(parameter) != 3 {
continue
}
var env_var []byte
if parameter[1] != nil {
env_var = parameter[1]
} else if parameter[2] != nil {
env_var = parameter[2]
} else {
continue
}
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
if ok {
env_val = escapeEnv(env_val)
contents = bytes.Replace(contents, parameter[0], []byte(env_val), 1)
}
}
return toml.Parse(contents)
}
func (c *Config) addAggregator(name string, table *ast.Table) error {
creator, ok := aggregators.Aggregators[name]
if !ok {
return fmt.Errorf("Undefined but requested aggregator: %s", name)
}
aggregator := creator()
conf, err := buildAggregator(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, aggregator); err != nil {
return err
}
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
return nil
}
func (c *Config) addProcessor(name string, table *ast.Table) error {
creator, ok := processors.Processors[name]
if !ok {
return fmt.Errorf("Undefined but requested processor: %s", name)
}
processor := creator()
processorConfig, err := buildProcessor(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, processor); err != nil {
return err
}
rf := models.NewRunningProcessor(processor, processorConfig)
c.Processors = append(c.Processors, rf)
return nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
}
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
output := creator()
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
switch t := output.(type) {
case serializers.SerializerOutput:
serializer, err := buildSerializer(name, table)
if err != nil {
return err
}
t.SetSerializer(serializer)
}
outputConfig, err := buildOutput(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, output); err != nil {
return err
}
ro := models.NewRunningOutput(name, output, outputConfig,
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
c.Outputs = append(c.Outputs, ro)
return nil
}
func (c *Config) addInput(name string, table *ast.Table) error {
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
return nil
}
// Legacy support renaming io input to diskio
if name == "io" {
name = "diskio"
}
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("Undefined but requested input: %s", name)
}
input := creator()
// If the input has a SetParser function, then this means it can accept
// arbitrary types of input, so build the parser and set it.
switch t := input.(type) {
case parsers.ParserInput:
parser, err := buildParser(name, table)
if err != nil {
return err
}
t.SetParser(parser)
}
switch t := input.(type) {
case parsers.ParserFuncInput:
config, err := getParserConfig(name, table)
if err != nil {
return err
}
t.SetParserFunc(func() (parsers.Parser, error) {
return parsers.NewParser(config)
})
}
pluginConfig, err := buildInput(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, input); err != nil {
return err
}
rp := models.NewRunningInput(input, pluginConfig)
rp.SetDefaultTags(c.Tags)
c.Inputs = append(c.Inputs, rp)
return nil
}
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
conf := &models.AggregatorConfig{
Name: name,
Delay: time.Millisecond * 100,
Period: time.Second * 30,
Grace: time.Second * 0,
}
if node, ok := tbl.Fields["period"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Period = dur
}
}
}
if node, ok := tbl.Fields["delay"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Delay = dur
}
}
}
if node, ok := tbl.Fields["grace"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Grace = dur
}
}
}
if node, ok := tbl.Fields["drop_original"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
conf.DropOriginal, err = strconv.ParseBool(b.Value)
if err != nil {
log.Printf("Error parsing boolean value for %s: %s\n", name, err)
}
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.NameOverride = str.Value
}
}
}
if node, ok := tbl.Fields["alias"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.Alias = str.Value
}
}
}
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
}
}
}
delete(tbl.Fields, "period")
delete(tbl.Fields, "delay")
delete(tbl.Fields, "grace")
delete(tbl.Fields, "drop_original")
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "alias")
delete(tbl.Fields, "tags")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
conf := &models.ProcessorConfig{Name: name}
if node, ok := tbl.Fields["order"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Integer); ok {
var err error
conf.Order, err = strconv.ParseInt(b.Value, 10, 64)
if err != nil {
log.Printf("Error parsing int value for %s: %s\n", name, err)
}
}
}
}
if node, ok := tbl.Fields["alias"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.Alias = str.Value
}
}
}
delete(tbl.Fields, "alias")
delete(tbl.Fields, "order")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the models.OutputConfig/models.InputConfig
// to be used for glob filtering on tags and measurements
func buildFilter(tbl *ast.Table) (models.Filter, error) {
f := models.Filter{}
if node, ok := tbl.Fields["namepass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NamePass = append(f.NamePass, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["namedrop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NameDrop = append(f.NameDrop, str.Value)
}
}
}
}
}
fields := []string{"pass", "fieldpass"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldPass = append(f.FieldPass, str.Value)
}
}
}
}
}
}
fields = []string{"drop", "fielddrop"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldDrop = append(f.FieldDrop, str.Value)
}
}
}
}
}
}
if node, ok := tbl.Fields["tagpass"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagPass = append(f.TagPass, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagdrop"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagDrop = append(f.TagDrop, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagexclude"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.TagExclude = append(f.TagExclude, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["taginclude"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.TagInclude = append(f.TagInclude, str.Value)
}
}
}
}
}
if err := f.Compile(); err != nil {
return f, err
}
delete(tbl.Fields, "namedrop")
delete(tbl.Fields, "namepass")
delete(tbl.Fields, "fielddrop")
delete(tbl.Fields, "fieldpass")
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "tagdrop")
delete(tbl.Fields, "tagpass")
delete(tbl.Fields, "tagexclude")
delete(tbl.Fields, "taginclude")
return f, nil
}
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
// models.InputConfig to be inserted into models.RunningInput
func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &models.InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
cp.Interval = dur
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.NameOverride = str.Value
}
}
}
if node, ok := tbl.Fields["alias"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.Alias = str.Value
}
}
}
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("E! Could not parse tags for input %s\n", name)
}
}
}
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "alias")
delete(tbl.Fields, "interval")
delete(tbl.Fields, "tags")
var err error
cp.Filter, err = buildFilter(tbl)
if err != nil {
return cp, err
}
return cp, nil
}
// buildParser grabs the necessary entries from the ast.Table for creating
// a parsers.Parser object, and creates it, which can then be added onto
// an Input object.
func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
config, err := getParserConfig(name, tbl)
if err != nil {
return nil, err
}
return parsers.NewParser(config)
}
func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
c := &parsers.Config{
JSONStrict: true,
}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataFormat = str.Value
}
}
}
// Legacy support, exec plugin originally parsed JSON by default.
if name == "exec" && c.DataFormat == "" {
c.DataFormat = "json"
} else if c.DataFormat == "" {
c.DataFormat = "influx"
}
if node, ok := tbl.Fields["separator"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Separator = str.Value
}
}
}
if node, ok := tbl.Fields["templates"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.Templates = append(c.Templates, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["tag_keys"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.TagKeys = append(c.TagKeys, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["json_string_fields"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.JSONStringFields = append(c.JSONStringFields, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["json_name_key"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONNameKey = str.Value
}
}
}
if node, ok := tbl.Fields["json_query"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONQuery = str.Value
}
}
}
if node, ok := tbl.Fields["json_time_key"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONTimeKey = str.Value
}
}
}
if node, ok := tbl.Fields["json_time_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONTimeFormat = str.Value
}
}
}
if node, ok := tbl.Fields["json_timezone"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONTimezone = str.Value
}
}
}
if node, ok := tbl.Fields["json_strict"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.JSONStrict, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["data_type"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataType = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdAuthFile = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_security_level"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdSecurityLevel = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdSplit = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardMetricRegistryPath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimePath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimeFormat = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_tags_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagsPath = str.Value
}
}
}
c.DropwizardTagPathsMap = make(map[string]string)
if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagPathsMap[name] = str.Value
}
}
}
}
}
//for grok data_format
if node, ok := tbl.Fields["grok_named_patterns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["grok_patterns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.GrokPatterns = append(c.GrokPatterns, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["grok_custom_patterns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.GrokCustomPatterns = str.Value
}
}
}
if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["grok_timezone"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.GrokTimezone = str.Value
}
}
}
if node, ok := tbl.Fields["grok_unique_timestamp"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.GrokUniqueTimestamp = str.Value
}
}
}
//for csv parser
if node, ok := tbl.Fields["csv_column_names"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CSVColumnNames = append(c.CSVColumnNames, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["csv_column_types"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["csv_tag_columns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CSVTagColumns = append(c.CSVTagColumns, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["csv_delimiter"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVDelimiter = str.Value
}
}
}
if node, ok := tbl.Fields["csv_comment"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVComment = str.Value
}
}
}
if node, ok := tbl.Fields["csv_measurement_column"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVMeasurementColumn = str.Value
}
}
}
if node, ok := tbl.Fields["csv_timestamp_column"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVTimestampColumn = str.Value
}
}
}
if node, ok := tbl.Fields["csv_timestamp_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVTimestampFormat = str.Value
}
}
}
if node, ok := tbl.Fields["csv_header_row_count"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.CSVHeaderRowCount = int(v)
}
}
}
if node, ok := tbl.Fields["csv_skip_rows"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.CSVSkipRows = int(v)
}
}
}
if node, ok := tbl.Fields["csv_skip_columns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.CSVSkipColumns = int(v)
}
}
}
if node, ok := tbl.Fields["csv_trim_space"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.Boolean); ok {
//for config with no quotes
val, err := strconv.ParseBool(str.Value)
c.CSVTrimSpace = val
if err != nil {
return nil, fmt.Errorf("E! parsing to bool: %v", err)
}
}
}
}
if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value)
}
}
}
}
}
c.MetricName = name
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "separator")
delete(tbl.Fields, "templates")
delete(tbl.Fields, "tag_keys")
delete(tbl.Fields, "json_name_key")
delete(tbl.Fields, "json_query")
delete(tbl.Fields, "json_string_fields")
delete(tbl.Fields, "json_time_format")
delete(tbl.Fields, "json_time_key")
delete(tbl.Fields, "json_timezone")
delete(tbl.Fields, "json_strict")
delete(tbl.Fields, "data_type")
delete(tbl.Fields, "collectd_auth_file")
delete(tbl.Fields, "collectd_security_level")
delete(tbl.Fields, "collectd_typesdb")
delete(tbl.Fields, "collectd_parse_multivalue")
delete(tbl.Fields, "dropwizard_metric_registry_path")
delete(tbl.Fields, "dropwizard_time_path")
delete(tbl.Fields, "dropwizard_time_format")
delete(tbl.Fields, "dropwizard_tags_path")
delete(tbl.Fields, "dropwizard_tag_paths")
delete(tbl.Fields, "grok_named_patterns")
delete(tbl.Fields, "grok_patterns")
delete(tbl.Fields, "grok_custom_patterns")
delete(tbl.Fields, "grok_custom_pattern_files")
delete(tbl.Fields, "grok_timezone")
delete(tbl.Fields, "grok_unique_timestamp")
delete(tbl.Fields, "csv_column_names")
delete(tbl.Fields, "csv_column_types")
delete(tbl.Fields, "csv_comment")
delete(tbl.Fields, "csv_delimiter")
delete(tbl.Fields, "csv_field_columns")
delete(tbl.Fields, "csv_header_row_count")
delete(tbl.Fields, "csv_measurement_column")
delete(tbl.Fields, "csv_skip_columns")
delete(tbl.Fields, "csv_skip_rows")
delete(tbl.Fields, "csv_tag_columns")
delete(tbl.Fields, "csv_timestamp_column")
delete(tbl.Fields, "csv_timestamp_format")
delete(tbl.Fields, "csv_trim_space")
delete(tbl.Fields, "form_urlencoded_tag_keys")
return c, nil
}
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataFormat = str.Value
}
}
}
if c.DataFormat == "" {
c.DataFormat = "influx"
}
if node, ok := tbl.Fields["prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Prefix = str.Value
}
}
}
if node, ok := tbl.Fields["template"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Template = str.Value
}
}
}
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.InfluxMaxLineBytes = int(v)
}
}
}
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxSortFields, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["influx_uint_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxUintSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["graphite_tag_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.GraphiteTagSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
timestampVal, err := time.ParseDuration(str.Value)
if err != nil {
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
}
// now that we have a duration, truncate it to the nearest
// power of ten (just in case)
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
c.TimestampUnits = time.Duration(new_nanoseconds)
}
}
}
if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.HecRouting, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.SplunkmetricMultiMetric, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["wavefront_source_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.WavefrontSourceOverride = append(c.WavefrontSourceOverride, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["wavefront_use_strict"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.WavefrontUseStrict, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.PrometheusExportTimestamp, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.PrometheusSortMetrics, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["prometheus_string_as_label"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.PrometheusStringAsLabel, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
delete(tbl.Fields, "influx_max_line_bytes")
delete(tbl.Fields, "influx_sort_fields")
delete(tbl.Fields, "influx_uint_support")
delete(tbl.Fields, "graphite_tag_support")
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "prefix")
delete(tbl.Fields, "template")
delete(tbl.Fields, "json_timestamp_units")
delete(tbl.Fields, "splunkmetric_hec_routing")
delete(tbl.Fields, "splunkmetric_multimetric")
delete(tbl.Fields, "wavefront_source_override")
delete(tbl.Fields, "wavefront_use_strict")
delete(tbl.Fields, "prometheus_export_timestamp")
delete(tbl.Fields, "prometheus_sort_metrics")
delete(tbl.Fields, "prometheus_string_as_label")
return serializers.NewSerializer(c)
}
// buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
filter, err := buildFilter(tbl)
if err != nil {
return nil, err
}
oc := &models.OutputConfig{
Name: name,
Filter: filter,
}
// TODO
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
if len(oc.Filter.FieldDrop) > 0 {
oc.Filter.NameDrop = oc.Filter.FieldDrop
}
if len(oc.Filter.FieldPass) > 0 {
oc.Filter.NamePass = oc.Filter.FieldPass
}
if node, ok := tbl.Fields["flush_interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
oc.FlushInterval = dur
}
}
}
if node, ok := tbl.Fields["flush_jitter"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
oc.FlushJitter = new(time.Duration)
*oc.FlushJitter = dur
}
}
}
if node, ok := tbl.Fields["metric_buffer_limit"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
oc.MetricBufferLimit = int(v)
}
}
}
if node, ok := tbl.Fields["metric_batch_size"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
oc.MetricBatchSize = int(v)
}
}
}
if node, ok := tbl.Fields["alias"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
oc.Alias = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
oc.NameOverride = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
oc.NameSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
oc.NamePrefix = str.Value
}
}
}
delete(tbl.Fields, "flush_interval")
delete(tbl.Fields, "flush_jitter")
delete(tbl.Fields, "metric_buffer_limit")
delete(tbl.Fields, "metric_batch_size")
delete(tbl.Fields, "alias")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_prefix")
return oc, nil
}
| [
"\"TELEGRAF_CONFIG_PATH\"",
"\"ProgramFiles\""
] | [] | [
"TELEGRAF_CONFIG_PATH",
"ProgramFiles"
] | [] | ["TELEGRAF_CONFIG_PATH", "ProgramFiles"] | go | 2 | 0 | |
plugins/cd_buttons.py | import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import os
import json
import math
import os
import shutil
import subprocess
import time
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from plugins.youtube_dl_button import youtube_dl_call_back
from plugins.dl_button import ddl_call_back
from translation import Translation
from pyrogram import Client
from helper_funcs.display_progress import progress_for_pyrogram, humanbytes
from plugins.dl_button import ddl_call_back
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from PIL import Image
@Client.on_callback_query()
async def button(bot, update):
if update.from_user.id in Config.BANNED_USERS:
await bot.delete_messages(
chat_id=update.message.chat.id,
message_ids=update.message.message_id,
revoke=True
)
return
# logger.info(update)
cb_data = update.data
if ":" in cb_data:
# unzip formats
extract_dir_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + "zipped" + "/"
if not os.path.isdir(extract_dir_path):
await bot.delete_messages(
chat_id=update.message.chat.id,
message_ids=update.message.message_id,
revoke=True
)
return False
zip_file_contents = os.listdir(extract_dir_path)
type_of_extract, index_extractor, undefined_tcartxe = cb_data.split(":")
if index_extractor == "NONE":
try:
shutil.rmtree(extract_dir_path)
except:
pass
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.CANCEL_STR,
message_id=update.message.message_id
)
elif index_extractor == "ALL":
i = 0
for file_content in zip_file_contents:
current_file_name = os.path.join(extract_dir_path, file_content)
start_time = time.time()
await bot.send_document(
chat_id=update.message.chat.id,
document=current_file_name,
# thumb=thumb_image_path,
caption=file_content,
# reply_markup=reply_markup,
reply_to_message_id=update.message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
i = i + 1
os.remove(current_file_name)
try:
shutil.rmtree(extract_dir_path)
except:
pass
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.ZIP_UPLOADED_STR.format(i, "0"),
message_id=update.message.message_id
)
else:
file_content = zip_file_contents[int(index_extractor)]
current_file_name = os.path.join(extract_dir_path, file_content)
start_time = time.time()
await bot.send_document(
chat_id=update.message.chat.id,
document=current_file_name,
# thumb=thumb_image_path,
caption=file_content,
# reply_markup=reply_markup,
reply_to_message_id=update.message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
try:
shutil.rmtree(extract_dir_path)
except:
pass
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.ZIP_UPLOADED_STR.format("1", "0"),
message_id=update.message.message_id
)
elif "|" in update.data:
await youtube_dl_call_back(bot, update)
elif "=" in update.data:
await ddl_call_back(bot, update)
elif update.data == "home":
await update.message.edit_text(
text=Translation.START_TEXT.format(update.from_user.mention),
reply_markup=Translation.START_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "help":
await update.message.edit_text(
text=Translation.HELP_TEXT,
reply_markup=Translation.HELP_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "about":
await update.message.edit_text(
text=Translation.ABOUT_TEXT,
reply_markup=Translation.ABOUT_BUTTONS,
disable_web_page_preview=True
)
else:
await update.message.delete()
| [] | [] | [
"WEBHOOK"
] | [] | ["WEBHOOK"] | python | 1 | 0 | |
djangoLogin/djangoLogin/asgi.py | """
ASGI config for djangoLogin project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoLogin.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
fragmap/main.py | #!/usr/bin/env python
# encoding: utf-8
# Copyright 2016-2021 Alexander Mollberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from fragmap.console_color import ANSI_UP
from fragmap.console_ui import print_fragmap
from fragmap.file_selection import FileSelection
from fragmap.generate_matrix import ConnectedFragmap, Fragmap, BriefFragmap
from fragmap.load_commits import CommitSelection, CommitLoader
from fragmap.web_ui import open_fragmap_page, start_fragmap_server
from getch.getch import getch
from . import debug
def make_fragmap(diff_list, files_arg, brief=False, infill=False) -> Fragmap:
fragmap = Fragmap.from_diffs(diff_list, files_arg)
# with open('fragmap_ast.json', 'wb') as f:
# json.dump(fragmap.patches, f, cls=DictCoersionEncoder)
if brief:
fragmap = BriefFragmap(fragmap)
if infill:
fragmap = ConnectedFragmap(fragmap)
return fragmap
def main():
if 'FRAGMAP_DEBUG' in os.environ:
debug_parser = debug.parse_args(extendable=True)
parent_parsers = [debug_parser]
else:
parent_parsers = []
# Parse command line arguments
argparser = argparse.ArgumentParser(prog='fragmap',
description='Visualize a timeline of Git commit changes on a grid',
parents=parent_parsers)
inspecarg = argparser.add_argument_group('input',
'Specify the input commits or patch file')
inspecarg.add_argument('-u', '--until', metavar='END_COMMIT', action='store',
required=False, dest='until',
help='Which commit to show until, inclusive.')
inspecarg.add_argument('-n', metavar='NUMBER_OF_COMMITS', action='store',
help='How many previous commits to show. Uncommitted changes are shown in addition to these.')
inspecarg.add_argument('-s', '--since', metavar='START_COMMIT',
action='store',
help='Which commit to start showing from, exclusive.')
argparser.add_argument('--no-color', action='store_true', required=False,
help='Disable color coding of the output.')
argparser.add_argument('-l', '--live', action='store_true', required=False,
help='Keep running and enable refreshing of the displayed fragmap')
outformatarg = argparser.add_mutually_exclusive_group(required=False)
argparser.add_argument('-f', '--full', action='store_true', required=False,
help='Show the full fragmap, disabling deduplication of the columns.')
outformatarg.add_argument('-w', '--web', action='store_true', required=False,
help='Generate and open an HTML document instead of printing to console. Implies -f')
argparser.add_argument('-i', '--files', metavar='FILE',
nargs='+', action='store', required=False,
dest='files', help="Which files to show changes "
"from. The default is all files.")
args = argparser.parse_args()
# Load commits
cl = CommitLoader()
if args.until and not args.since:
print('Error: --since/-s must be used if --until/-u is used')
exit(1)
max_count = None
if args.n:
max_count = int(args.n)
if not (args.until or args.since or args.n):
max_count = 3
lines_printed = [0]
columns_printed = [0]
def serve():
def erase_current_line():
print('\r' + ' ' * columns_printed[0] + '\r', end='')
# Make way for status updates from below operations
erase_current_line()
selection = CommitSelection(since_ref=args.since,
until_ref=args.until,
max_count=max_count,
include_staged=not args.until,
include_unstaged=not args.until)
is_full = args.full or args.web
debug.get('console').debug(selection)
diff_list = cl.load(os.getcwd(), selection)
debug.get('console').debug(diff_list)
print('... Generating fragmap\r', end='')
fm = make_fragmap(diff_list, args.files, not is_full, False)
print(' \r', end='')
# Erase each line and move cursor up to overwrite previous fragmap
erase_current_line()
for i in range(lines_printed[0]):
print(ANSI_UP, end='')
erase_current_line()
return fm
fragmap = serve()
if args.web:
if args.live:
start_fragmap_server(serve)
else:
open_fragmap_page(fragmap, args.live)
else:
lines_printed[0], columns_printed[0] = print_fragmap(fragmap,
do_color=not args.no_color)
if args.live:
while True:
print('Press Enter to refresh', end='')
sys.stdout.flush()
key = getch()
if ord(key) != 0xd:
break
fragmap = serve()
lines_printed[0], columns_printed[0] = print_fragmap(fragmap,
do_color=not args.no_color)
print('')
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
yt/funcs.py | import base64
import builtins
import contextlib
import copy
import errno
import getpass
import glob
import inspect
import itertools
import os
import pdb
import re
import struct
import subprocess
import sys
import time
import traceback
import urllib.parse
import urllib.request
import warnings
from functools import lru_cache, wraps
from numbers import Number as numeric_type
from typing import Any, Callable, Type
import matplotlib
import numpy as np
from more_itertools import always_iterable, collapse, first
from packaging.version import Version
from tqdm import tqdm
from yt.units import YTArray, YTQuantity
from yt.utilities.exceptions import YTInvalidWidthError
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _requests as requests
# Some functions for handling sequences and other types
def is_sequence(obj):
"""
Grabbed from Python Cookbook / matplotlib.cbook. Returns true/false for
Parameters
----------
obj : iterable
"""
try:
len(obj)
return True
except TypeError:
return False
def iter_fields(field_or_fields):
"""
Create an iterator for field names, specified as single strings or tuples(fname,
ftype) alike.
This can safely be used in places where we accept a single field or a list as input.
Parameters
----------
field_or_fields: str, tuple(str, str), or any iterable of the previous types.
Examples
--------
>>> fields = ("gas", "density")
>>> for field in iter_fields(fields):
... print(field)
density
>>> fields = ("gas", "density")
>>> for field in iter_fields(fields):
... print(field)
('gas', 'density')
>>> fields = [("gas", "density"), ("gas", "temperature"), ("index", "dx")]
>>> for field in iter_fields(fields):
... print(field)
density
temperature
('index', 'dx')
"""
return always_iterable(field_or_fields, base_type=(tuple, str, bytes))
def ensure_numpy_array(obj):
"""
This function ensures that *obj* is a numpy array. Typically used to
convert scalar, list or tuple argument passed to functions using Cython.
"""
if isinstance(obj, np.ndarray):
if obj.shape == ():
return np.array([obj])
# We cast to ndarray to catch ndarray subclasses
return np.array(obj)
elif isinstance(obj, (list, tuple)):
return np.asarray(obj)
else:
return np.asarray([obj])
def read_struct(f, fmt):
"""
This reads a struct, and only that struct, from an open file.
"""
s = f.read(struct.calcsize(fmt))
return struct.unpack(fmt, s)
def just_one(obj):
# If we have an iterable, sometimes we only want one item
return first(collapse(obj))
def compare_dicts(dict1, dict2):
if not set(dict1) <= set(dict2):
return False
for key in dict1.keys():
if dict1[key] is not None and dict2[key] is not None:
if isinstance(dict1[key], dict):
if compare_dicts(dict1[key], dict2[key]):
continue
else:
return False
try:
comparison = np.array_equal(dict1[key], dict2[key])
except TypeError:
comparison = dict1[key] == dict2[key]
if not comparison:
return False
return True
# Taken from
# http://www.goldb.org/goldblog/2008/02/06/PythonConvertSecsIntoHumanReadableTimeStringHHMMSS.aspx
def humanize_time(secs):
"""
Takes *secs* and returns a nicely formatted string
"""
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return "%02d:%02d:%02d" % (hours, mins, secs)
#
# Some function wrappers that come in handy once in a while
#
# we use the resource module to get the memory page size
try:
import resource
except ImportError:
pass
def get_memory_usage(subtract_share=False):
"""
Returning resident size in megabytes
"""
pid = os.getpid()
try:
pagesize = resource.getpagesize()
except NameError:
return -1024
status_file = f"/proc/{pid}/statm"
if not os.path.isfile(status_file):
return -1024
line = open(status_file).read()
size, resident, share, text, library, data, dt = (int(i) for i in line.split())
if subtract_share:
resident -= share
return resident * pagesize / (1024 * 1024) # return in megs
def time_execution(func):
r"""
Decorator for seeing how long a given function takes, depending on whether
or not the global 'yt.time_functions' config parameter is set.
"""
@wraps(func)
def wrapper(*arg, **kw):
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
mylog.debug("%s took %0.3f s", func.__name__, (t2 - t1))
return res
from yt.config import ytcfg
if ytcfg.get("yt", "time_functions"):
return wrapper
else:
return func
def print_tb(func):
"""
This function is used as a decorate on a function to have the calling stack
printed whenever that function is entered.
This can be used like so:
>>> @print_tb
... def some_deeply_nested_function(*args, **kwargs):
... ...
"""
@wraps(func)
def run_func(*args, **kwargs):
traceback.print_stack()
return func(*args, **kwargs)
return run_func
def rootonly(func):
"""
This is a decorator that, when used, will only call the function on the
root processor.
This can be used like so:
.. code-block:: python
@rootonly
def some_root_only_function(*args, **kwargs):
...
"""
from yt.config import ytcfg
@wraps(func)
def check_parallel_rank(*args, **kwargs):
if ytcfg.get("yt", "internals", "topcomm_parallel_rank") > 0:
return
return func(*args, **kwargs)
return check_parallel_rank
def pdb_run(func):
"""
This decorator inserts a pdb session on top of the call-stack into a
function.
This can be used like so:
>>> @pdb_run
... def some_function_to_debug(*args, **kwargs):
... ...
"""
@wraps(func)
def wrapper(*args, **kw):
pdb.runcall(func, *args, **kw)
return wrapper
__header = """
== Welcome to the embedded IPython Shell ==
You are currently inside the function:
%(fname)s
Defined in:
%(filename)s:%(lineno)s
"""
def insert_ipython(num_up=1):
"""
Placed inside a function, this will insert an IPython interpreter at that
current location. This will enabled detailed inspection of the current
execution environment, as well as (optional) modification of that environment.
*num_up* refers to how many frames of the stack get stripped off, and
defaults to 1 so that this function itself is stripped off.
"""
import IPython
from IPython.terminal.embed import InteractiveShellEmbed
try:
from traitlets.config.loader import Config
except ImportError:
from IPython.config.loader import Config
frame = inspect.stack()[num_up]
loc = frame[0].f_locals.copy()
glo = frame[0].f_globals
dd = dict(fname=frame[3], filename=frame[1], lineno=frame[2])
cfg = Config()
cfg.InteractiveShellEmbed.local_ns = loc
cfg.InteractiveShellEmbed.global_ns = glo
IPython.embed(config=cfg, banner2=__header % dd)
ipshell = InteractiveShellEmbed(config=cfg)
del ipshell
#
# Our progress bar types and how to get one
#
class TqdmProgressBar:
# This is a drop in replacement for pbar
# called tqdm
def __init__(self, title, maxval):
self._pbar = tqdm(leave=True, total=maxval, desc=title)
self.i = 0
def update(self, i=None):
if i is None:
i = self.i + 1
n = i - self.i
self.i = i
self._pbar.update(n)
def finish(self):
self._pbar.close()
class DummyProgressBar:
# This progressbar gets handed if we don't
# want ANY output
def __init__(self, *args, **kwargs):
return
def update(self, *args, **kwargs):
return
def finish(self, *args, **kwargs):
return
def get_pbar(title, maxval):
"""
This returns a progressbar of the most appropriate type, given a *title*
and a *maxval*.
"""
maxval = max(maxval, 1)
from yt.config import ytcfg
if (
ytcfg.get("yt", "suppress_stream_logging")
or ytcfg.get("yt", "internals", "within_testing")
or maxval == 1
or not is_root()
):
return DummyProgressBar()
return TqdmProgressBar(title, maxval)
def only_on_root(func, *args, **kwargs):
"""
This function accepts a *func*, a set of *args* and *kwargs* and then only
on the root processor calls the function. All other processors get "None"
handed back.
"""
from yt.config import ytcfg
if kwargs.pop("global_rootonly", False):
cfg_option = "global_parallel_rank"
else:
cfg_option = "topcomm_parallel_rank"
if not ytcfg.get("yt", "internals", "parallel"):
return func(*args, **kwargs)
if ytcfg.get("yt", "internals", cfg_option) > 0:
return
return func(*args, **kwargs)
def is_root():
"""
This function returns True if it is on the root processor of the
topcomm and False otherwise.
"""
from yt.config import ytcfg
if not ytcfg.get("yt", "internals", "parallel"):
return True
return ytcfg.get("yt", "internals", "topcomm_parallel_rank") == 0
#
# Our signal and traceback handling functions
#
def signal_print_traceback(signo, frame):
print(traceback.print_stack(frame))
def signal_problem(signo, frame):
raise RuntimeError()
def signal_ipython(signo, frame):
insert_ipython(2)
def paste_traceback(exc_type, exc, tb):
"""
This is a traceback handler that knows how to paste to the pastebin.
Should only be used in sys.excepthook.
"""
sys.__excepthook__(exc_type, exc, tb)
import xmlrpc.client
from io import StringIO
p = xmlrpc.client.ServerProxy(
"http://paste.yt-project.org/xmlrpc/", allow_none=True
)
s = StringIO()
traceback.print_exception(exc_type, exc, tb, file=s)
s = s.getvalue()
ret = p.pastes.newPaste("pytb", s, None, "", "", True)
print()
print(f"Traceback pasted to http://paste.yt-project.org/show/{ret}")
print()
def paste_traceback_detailed(exc_type, exc, tb):
"""
This is a traceback handler that knows how to paste to the pastebin.
Should only be used in sys.excepthook.
"""
import cgitb
import xmlrpc.client
from io import StringIO
s = StringIO()
handler = cgitb.Hook(format="text", file=s)
handler(exc_type, exc, tb)
s = s.getvalue()
print(s)
p = xmlrpc.client.ServerProxy(
"http://paste.yt-project.org/xmlrpc/", allow_none=True
)
ret = p.pastes.newPaste("text", s, None, "", "", True)
print()
print(f"Traceback pasted to http://paste.yt-project.org/show/{ret}")
print()
_ss = "fURbBUUBE0cLXgETJnZgJRMXVhVGUQpQAUBuehQMUhJWRFFRAV1ERAtBXw1dAxMLXT4zXBFfABNN\nC0ZEXw1YUURHCxMXVlFERwxWCQw=\n"
def _rdbeta(key):
enc_s = base64.decodestring(_ss)
dec_s = "".join(chr(ord(a) ^ ord(b)) for a, b in zip(enc_s, itertools.cycle(key)))
print(dec_s)
#
# Some exceptions
#
class NoCUDAException(Exception):
pass
class YTEmptyClass:
pass
def update_git(path):
try:
import git
except ImportError:
print("Updating and precise version information requires ")
print("gitpython to be installed.")
print("Try: python -m pip install gitpython")
return -1
with open(os.path.join(path, "yt_updater.log"), "a") as f:
repo = git.Repo(path)
if repo.is_dirty(untracked_files=True):
print("Changes have been made to the yt source code so I won't ")
print("update the code. You will have to do this yourself.")
print("Here's a set of sample commands:")
print("")
print(f" $ cd {path}")
print(" $ git stash")
print(" $ git checkout main")
print(" $ git pull")
print(" $ git stash pop")
print(f" $ {sys.executable} setup.py develop")
print("")
return 1
if repo.active_branch.name != "main":
print("yt repository is not tracking the main branch so I won't ")
print("update the code. You will have to do this yourself.")
print("Here's a set of sample commands:")
print("")
print(f" $ cd {path}")
print(" $ git checkout main")
print(" $ git pull")
print(f" $ {sys.executable} setup.py develop")
print("")
return 1
print("Updating the repository")
f.write("Updating the repository\n\n")
old_version = repo.git.rev_parse("HEAD", short=12)
try:
remote = repo.remotes.yt_upstream
except AttributeError:
remote = repo.create_remote(
"yt_upstream", url="https://github.com/yt-project/yt"
)
remote.fetch()
main = repo.heads.main
main.set_tracking_branch(remote.refs.main)
main.checkout()
remote.pull()
new_version = repo.git.rev_parse("HEAD", short=12)
f.write(f"Updated from {old_version} to {new_version}\n\n")
rebuild_modules(path, f)
print("Updated successfully")
def rebuild_modules(path, f):
f.write("Rebuilding modules\n\n")
p = subprocess.Popen(
[sys.executable, "setup.py", "build_ext", "-i"],
cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = p.communicate()
f.write(stdout.decode("utf-8"))
f.write("\n\n")
if p.returncode:
print(f"BROKEN: See {os.path.join(path, 'yt_updater.log')}")
sys.exit(1)
f.write("Successful!\n")
def get_git_version(path):
try:
import git
except ImportError:
print("Updating and precise version information requires ")
print("gitpython to be installed.")
print("Try: python -m pip install gitpython")
return None
try:
repo = git.Repo(path)
return repo.git.rev_parse("HEAD", short=12)
except git.InvalidGitRepositoryError:
# path is not a git repository
return None
def get_yt_version():
import pkg_resources
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
version = get_git_version(path)
if version is None:
return version
else:
v_str = version[:12].strip()
if hasattr(v_str, "decode"):
v_str = v_str.decode("utf-8")
return v_str
def get_version_stack():
version_info = {}
version_info["yt"] = get_yt_version()
version_info["numpy"] = np.version.version
version_info["matplotlib"] = matplotlib.__version__
return version_info
def get_script_contents():
top_frame = inspect.stack()[-1]
finfo = inspect.getframeinfo(top_frame[0])
if finfo[2] != "<module>":
return None
if not os.path.exists(finfo[0]):
return None
try:
contents = open(finfo[0]).read()
except Exception:
contents = None
return contents
def download_file(url, filename):
try:
return fancy_download_file(url, filename, requests)
except ImportError:
# fancy_download_file requires requests
return simple_download_file(url, filename)
def fancy_download_file(url, filename, requests=None):
response = requests.get(url, stream=True)
total_length = response.headers.get("content-length")
with open(filename, "wb") as fh:
if total_length is None:
fh.write(response.content)
else:
blocksize = 4 * 1024**2
iterations = int(float(total_length) / float(blocksize))
pbar = get_pbar(
"Downloading %s to %s " % os.path.split(filename)[::-1], iterations
)
iteration = 0
for chunk in response.iter_content(chunk_size=blocksize):
fh.write(chunk)
iteration += 1
pbar.update(iteration)
pbar.finish()
return filename
def simple_download_file(url, filename):
class MyURLopener(urllib.request.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise RuntimeError(
"Attempt to download file from %s failed with error %s: %s."
% (url, errcode, errmsg)
)
fn, h = MyURLopener().retrieve(url, filename)
return fn
# This code snippet is modified from Georg Brandl
def bb_apicall(endpoint, data, use_pass=True):
uri = f"https://api.bitbucket.org/1.0/{endpoint}/"
# since bitbucket doesn't return the required WWW-Authenticate header when
# making a request without Authorization, we cannot use the standard urllib2
# auth handlers; we have to add the requisite header from the start
if data is not None:
data = urllib.parse.urlencode(data)
req = urllib.request.Request(uri, data)
if use_pass:
username = input("Bitbucket Username? ")
password = getpass.getpass()
upw = f"{username}:{password}"
req.add_header("Authorization", f"Basic {base64.b64encode(upw).strip()}")
return urllib.request.urlopen(req).read()
def fix_length(length, ds):
registry = ds.unit_registry
if isinstance(length, YTArray):
if registry is not None:
length.units.registry = registry
return length.in_units("code_length")
if isinstance(length, numeric_type):
return YTArray(length, "code_length", registry=registry)
length_valid_tuple = isinstance(length, (list, tuple)) and len(length) == 2
unit_is_string = isinstance(length[1], str)
length_is_number = isinstance(length[0], numeric_type) and not isinstance(
length[0], YTArray
)
if length_valid_tuple and unit_is_string and length_is_number:
return YTArray(*length, registry=registry)
else:
raise RuntimeError(f"Length {str(length)} is invalid")
@contextlib.contextmanager
def parallel_profile(prefix):
r"""A context manager for profiling parallel code execution using cProfile
This is a simple context manager that automatically profiles the execution
of a snippet of code.
Parameters
----------
prefix : string
A string name to prefix outputs with.
Examples
--------
>>> from yt import PhasePlot
>>> from yt.testing import fake_random_ds
>>> fields = ("density", "temperature", "cell_mass")
>>> units = ("g/cm**3", "K", "g")
>>> ds = fake_random_ds(16, fields=fields, units=units)
>>> with parallel_profile("my_profile"):
... plot = PhasePlot(ds.all_data(), *fields)
"""
import cProfile
from yt.config import ytcfg
fn = "%s_%04i_%04i.cprof" % (
prefix,
ytcfg.get("yt", "internals", "topcomm_parallel_size"),
ytcfg.get("yt", "internals", "topcomm_parallel_rank"),
)
p = cProfile.Profile()
p.enable()
yield fn
p.disable()
p.dump_stats(fn)
def get_num_threads():
from .config import ytcfg
nt = ytcfg.get("yt", "num_threads")
if nt < 0:
return os.environ.get("OMP_NUM_THREADS", 0)
return nt
def fix_axis(axis, ds):
return ds.coordinates.axis_id.get(axis, axis)
def get_output_filename(name, keyword, suffix):
r"""Return an appropriate filename for output.
With a name provided by the user, this will decide how to appropriately name the
output file by the following rules:
1. if name is None, the filename will be the keyword plus the suffix.
2. if name ends with "/" (resp "\" on Windows), assume name is a directory and the
file will be named name/(keyword+suffix). If the directory does not exist, first
try to create it and raise an exception if an error occurs.
3. if name does not end in the suffix, add the suffix.
Parameters
----------
name : str
A filename given by the user.
keyword : str
A default filename prefix if name is None.
suffix : str
Suffix that must appear at end of the filename.
This will be added if not present.
Examples
--------
>>> get_output_filename(None, "Projection_x", ".png")
'Projection_x.png'
>>> get_output_filename("my_file", "Projection_x", ".png")
'my_file.png'
>>> get_output_filename("my_dir/", "Projection_x", ".png")
'my_dir/Projection_x.png'
"""
if name is None:
name = keyword
name = os.path.expanduser(name)
if name.endswith(os.sep) and not os.path.isdir(name):
ensure_dir(name)
if os.path.isdir(name):
name = os.path.join(name, keyword)
if not name.endswith(suffix):
name += suffix
return name
def ensure_dir_exists(path):
r"""Create all directories in path recursively in a parallel safe manner"""
my_dir = os.path.dirname(path)
# If path is a file in the current directory, like "test.txt", then my_dir
# would be an empty string, resulting in FileNotFoundError when passed to
# ensure_dir. Let's avoid that.
if my_dir:
ensure_dir(my_dir)
def ensure_dir(path):
r"""Parallel safe directory maker."""
if os.path.exists(path):
return path
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
return path
def validate_width_tuple(width):
if not is_sequence(width) or len(width) != 2:
raise YTInvalidWidthError(f"width ({width}) is not a two element tuple")
is_numeric = isinstance(width[0], numeric_type)
length_has_units = isinstance(width[0], YTArray)
unit_is_string = isinstance(width[1], str)
if not is_numeric or length_has_units and unit_is_string:
msg = f"width ({str(width)}) is invalid. "
msg += "Valid widths look like this: (12, 'au')"
raise YTInvalidWidthError(msg)
_first_cap_re = re.compile("(.)([A-Z][a-z]+)")
_all_cap_re = re.compile("([a-z0-9])([A-Z])")
@lru_cache(maxsize=128, typed=False)
def camelcase_to_underscore(name):
s1 = _first_cap_re.sub(r"\1_\2", name)
return _all_cap_re.sub(r"\1_\2", s1).lower()
def set_intersection(some_list):
if len(some_list) == 0:
return set()
# This accepts a list of iterables, which we get the intersection of.
s = set(some_list[0])
for l in some_list[1:]:
s.intersection_update(l)
return s
@contextlib.contextmanager
def memory_checker(interval=15, dest=None):
r"""This is a context manager that monitors memory usage.
Parameters
----------
interval : int
The number of seconds between printing the current memory usage in
gigabytes of the current Python interpreter.
Examples
--------
>>> with memory_checker(10):
... arr = np.zeros(1024 * 1024 * 1024, dtype="float64")
... time.sleep(15)
... del arr
MEMORY: -1.000e+00 gb
"""
import threading
if dest is None:
dest = sys.stdout
class MemoryChecker(threading.Thread):
def __init__(self, event, interval):
self.event = event
self.interval = interval
threading.Thread.__init__(self)
def run(self):
while not self.event.wait(self.interval):
print(f"MEMORY: {get_memory_usage() / 1024.0:0.3e} gb", file=dest)
e = threading.Event()
mem_check = MemoryChecker(e, interval)
mem_check.start()
try:
yield
finally:
e.set()
def enable_plugins(plugin_filename=None):
"""Forces a plugin file to be parsed.
A plugin file is a means of creating custom fields, quantities,
data objects, colormaps, and other code classes and objects to be used
in yt scripts without modifying the yt source directly.
If ``plugin_filename`` is omitted, this function will look for a plugin file at
``$HOME/.config/yt/my_plugins.py``, which is the preferred behaviour for a
system-level configuration.
Warning: a script using this function will only be reproducible if your plugin
file is shared with it.
"""
import yt
from yt.config import config_dir, ytcfg
from yt.fields.my_plugin_fields import my_plugins_fields
if plugin_filename is not None:
_fn = plugin_filename
if not os.path.isfile(_fn):
raise FileNotFoundError(_fn)
else:
# Determine global plugin location. By decreasing priority order:
# - absolute path
# - CONFIG_DIR
# - obsolete config dir.
my_plugin_name = ytcfg.get("yt", "plugin_filename")
for base_prefix in ("", config_dir()):
if os.path.isfile(os.path.join(base_prefix, my_plugin_name)):
_fn = os.path.join(base_prefix, my_plugin_name)
break
else:
raise FileNotFoundError("Could not find a global system plugin file.")
mylog.info("Loading plugins from %s", _fn)
ytdict = yt.__dict__
execdict = ytdict.copy()
execdict["add_field"] = my_plugins_fields.add_field
with open(_fn) as f:
code = compile(f.read(), _fn, "exec")
exec(code, execdict, execdict)
ytnamespace = list(ytdict.keys())
for k in execdict.keys():
if k not in ytnamespace:
if callable(execdict[k]):
setattr(yt, k, execdict[k])
def subchunk_count(n_total, chunk_size):
handled = 0
while handled < n_total:
tr = min(n_total - handled, chunk_size)
yield tr
handled += tr
def fix_unitary(u):
if u == "1":
return "unitary"
else:
return u
def get_hash(infile, algorithm="md5", BLOCKSIZE=65536):
"""Generate file hash without reading in the entire file at once.
Original code licensed under MIT. Source:
https://www.pythoncentral.io/hashing-files-with-python/
Parameters
----------
infile : str
File of interest (including the path).
algorithm : str (optional)
Hash algorithm of choice. Defaults to 'md5'.
BLOCKSIZE : int (optional)
How much data in bytes to read in at once.
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> with NamedTemporaryFile() as file:
... get_hash(file.name)
'd41d8cd98f00b204e9800998ecf8427e'
"""
import hashlib
try:
hasher = getattr(hashlib, algorithm)()
except AttributeError as e:
raise NotImplementedError(
f"'{algorithm}' not available! Available algorithms: {hashlib.algorithms}"
) from e
filesize = os.path.getsize(infile)
iterations = int(float(filesize) / float(BLOCKSIZE))
pbar = get_pbar(f"Generating {algorithm} hash", iterations)
iter = 0
with open(infile, "rb") as f:
buf = f.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(BLOCKSIZE)
iter += 1
pbar.update(iter)
pbar.finish()
return hasher.hexdigest()
def get_brewer_cmap(cmap):
"""Returns a colorbrewer colormap from palettable"""
try:
import brewer2mpl
except ImportError:
brewer2mpl = None
try:
import palettable
except ImportError:
palettable = None
if palettable is not None:
bmap = palettable.colorbrewer.get_map(*cmap)
elif brewer2mpl is not None:
warnings.warn(
"Using brewer2mpl colormaps is deprecated. "
"Please install the successor to brewer2mpl, "
"palettable, with `pip install palettable`. "
"Colormap tuple names remain unchanged."
)
bmap = brewer2mpl.get_map(*cmap)
else:
raise RuntimeError("Please install palettable to use colorbrewer colormaps")
return bmap.get_mpl_colormap(N=cmap[2])
@contextlib.contextmanager
def dummy_context_manager(*args, **kwargs):
yield
def matplotlib_style_context(style_name=None, after_reset=False):
"""Returns a context manager for controlling matplotlib style.
Arguments are passed to matplotlib.style.context() if specified. Defaults
to setting "classic" style, after resetting to the default config parameters.
On older matplotlib versions (<=1.5.0) where matplotlib.style isn't
available, returns a dummy context manager.
"""
if style_name is None:
import matplotlib
style_name = {"mathtext.fontset": "cm"}
if Version(matplotlib.__version__) >= Version("3.3.0"):
style_name["mathtext.fallback"] = "cm"
else:
style_name["mathtext.fallback_to_cm"] = True
try:
import matplotlib.style
return matplotlib.style.context(style_name, after_reset=after_reset)
except ImportError:
pass
return dummy_context_manager()
interactivity = False
"""Sets the condition that interactive backends can be used."""
def toggle_interactivity():
global interactivity
interactivity = not interactivity
if interactivity:
if "__IPYTHON__" in dir(builtins):
import IPython
shell = IPython.get_ipython()
shell.magic("matplotlib")
else:
import matplotlib
matplotlib.interactive(True)
def get_interactivity():
return interactivity
def setdefaultattr(obj, name, value):
"""Set attribute with *name* on *obj* with *value* if it doesn't exist yet
Analogous to dict.setdefault
"""
if not hasattr(obj, name):
setattr(obj, name, value)
return getattr(obj, name)
def parse_h5_attr(f, attr):
"""A Python3-safe function for getting hdf5 attributes.
If an attribute is supposed to be a string, this will return it as such.
"""
val = f.attrs.get(attr, None)
if isinstance(val, bytes):
return val.decode("utf8")
else:
return val
def obj_length(v):
if is_sequence(v):
return len(v)
else:
# If something isn't iterable, we return 0
# to signify zero length (aka a scalar).
return 0
def array_like_field(data, x, field):
field = data._determine_fields(field)[0]
if isinstance(field, tuple):
finfo = data.ds._get_field_info(field[0], field[1])
else:
finfo = data.ds._get_field_info(field)
if finfo.sampling_type == "particle":
units = finfo.output_units
else:
units = finfo.units
if isinstance(x, YTArray):
arr = copy.deepcopy(x)
arr.convert_to_units(units)
return arr
if isinstance(x, np.ndarray):
return data.ds.arr(x, units)
else:
return data.ds.quan(x, units)
def validate_3d_array(obj):
if not is_sequence(obj) or len(obj) != 3:
raise TypeError(
"Expected an array of size (3,), received '%s' of "
"length %s" % (str(type(obj)).split("'")[1], len(obj))
)
def validate_float(obj):
"""Validates if the passed argument is a float value.
Raises an exception if `obj` is a single float value
or a YTQuantity of size 1.
Parameters
----------
obj : Any
Any argument which needs to be checked for a single float value.
Raises
------
TypeError
Raised if `obj` is not a single float value or YTQunatity
Examples
--------
>>> validate_float(1)
>>> validate_float(1.50)
>>> validate_float(YTQuantity(1, "cm"))
>>> validate_float((1, "cm"))
>>> validate_float([1, 1, 1])
Traceback (most recent call last):
...
TypeError: Expected a numeric value (or size-1 array), received 'list' of length 3
>>> validate_float([YTQuantity(1, "cm"), YTQuantity(2, "cm")])
Traceback (most recent call last):
...
TypeError: Expected a numeric value (or size-1 array), received 'list' of length 2
"""
if isinstance(obj, tuple):
if (
len(obj) != 2
or not isinstance(obj[0], numeric_type)
or not isinstance(obj[1], str)
):
raise TypeError(
"Expected a numeric value (or tuple of format "
"(float, String)), received an inconsistent tuple "
"'%s'." % str(obj)
)
else:
return
if is_sequence(obj) and (len(obj) != 1 or not isinstance(obj[0], numeric_type)):
raise TypeError(
"Expected a numeric value (or size-1 array), "
"received '%s' of length %s" % (str(type(obj)).split("'")[1], len(obj))
)
def validate_sequence(obj):
if obj is not None and not is_sequence(obj):
raise TypeError(
"Expected an iterable object,"
" received '%s'" % str(type(obj)).split("'")[1]
)
def validate_field_key(key):
if (
isinstance(key, tuple)
and len(key) == 2
and all(isinstance(_, str) for _ in key)
):
return
raise TypeError(
"Expected a 2-tuple of strings formatted as\n"
"(field or particle type, field name)\n"
f"Received invalid field key: {key}, with type {type(key)}"
)
def validate_object(obj, data_type):
if obj is not None and not isinstance(obj, data_type):
raise TypeError(
"Expected an object of '%s' type, received '%s'"
% (str(data_type).split("'")[1], str(type(obj)).split("'")[1])
)
def validate_axis(ds, axis):
if ds is not None:
valid_axis = ds.coordinates.axis_name.keys()
else:
valid_axis = [0, 1, 2, "x", "y", "z", "X", "Y", "Z"]
if axis not in valid_axis:
raise TypeError(
"Expected axis of int or char type (can be %s), "
"received '%s'." % (list(valid_axis), axis)
)
def validate_center(center):
if isinstance(center, str):
c = center.lower()
if (
c not in ["c", "center", "m", "max", "min"]
and not c.startswith("max_")
and not c.startswith("min_")
):
raise TypeError(
"Expected 'center' to be in ['c', 'center', "
"'m', 'max', 'min'] or the prefix to be "
"'max_'/'min_', received '%s'." % center
)
elif not isinstance(center, (numeric_type, YTQuantity)) and not is_sequence(center):
raise TypeError(
"Expected 'center' to be a numeric object of type "
"list/tuple/np.ndarray/YTArray/YTQuantity, "
"received '%s'." % str(type(center)).split("'")[1]
)
def sglob(pattern):
"""
Return the results of a glob through the sorted() function.
"""
return sorted(glob.glob(pattern))
def dictWithFactory(factory: Callable[[Any], Any]) -> Type:
"""
Create a dictionary class with a default factory function.
Contrary to `collections.defaultdict`, the factory takes
the missing key as input parameter.
Parameters
----------
factory : callable(key) -> value
The factory to call when hitting a missing key
Returns
-------
DictWithFactory class
A class to create new dictionaries handling missing keys.
"""
class DictWithFactory(dict):
def __init__(self, *args, **kwargs):
self.factory = factory
super().__init__(*args, **kwargs)
def __missing__(self, key):
val = self.factory(key)
self[key] = val
return val
return DictWithFactory
def levenshtein_distance(seq1, seq2, max_dist=None):
"""
Compute the levenshtein distance between seq1 and seq2.
From https://stackabuse.com/levenshtein-distance-and-text-similarity-in-python/
Parameters
----------
seq1 : str
seq2 : str
The strings to compute the distance between
max_dist : integer
If not None, maximum distance returned (see notes).
Returns
-------
The Levenshtein distance as an integer.
Notes
-----
This computes the Levenshtein distance, i.e. the number of edits to change
seq1 into seq2. If a maximum distance is passed, the algorithm will stop as soon
as the number of edits goes above the value. This allows for an earlier break
and speeds calculations up.
"""
size_x = len(seq1) + 1
size_y = len(seq2) + 1
if max_dist is None:
max_dist = max(size_x, size_y)
if abs(size_x - size_y) > max_dist:
return max_dist + 1
matrix = np.zeros((size_x, size_y), dtype=int)
for x in range(size_x):
matrix[x, 0] = x
for y in range(size_y):
matrix[0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if seq1[x - 1] == seq2[y - 1]:
matrix[x, y] = min(
matrix[x - 1, y] + 1, matrix[x - 1, y - 1], matrix[x, y - 1] + 1
)
else:
matrix[x, y] = min(
matrix[x - 1, y] + 1, matrix[x - 1, y - 1] + 1, matrix[x, y - 1] + 1
)
# Early break: the minimum distance is already larger than
# maximum allow value, can return safely.
if matrix[x].min() > max_dist:
return max_dist + 1
return matrix[size_x - 1, size_y - 1]
| [] | [] | [
"OMP_NUM_THREADS"
] | [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
rpc.go | package logger
import (
"errors"
"fmt"
"os"
"strings"
)
// RPC converts the given message into an error that suitable for return from an
// RPC function:
//
// 1. In a production environment, a generic error message is returned to avoid
// leaking implementation details to public callers.
//
// 2. In a debug environment, the provided message is formatted and returned to
// facilitate cross-microservice debugging.
//
// Either way, the message is logged.
func RPC(msg string, args ...interface{}) error {
if len(msg) > 0 {
display := strings.ToUpper(msg[:1]) + msg[1:] + "."
Error(display, args...)
}
if os.Getenv("DEBUG") != "" {
return fmt.Errorf(msg, args...)
}
return errors.New("failed to complete RPC request")
}
| [
"\"DEBUG\""
] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | go | 1 | 0 | |
pkg/controller/sparkapplication/controller_test.go | /*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sparkapplication
import (
"context"
"fmt"
"os"
"os/exec"
"strings"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
prometheus_model "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
kubeclientfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
crdclientfake "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/fake"
crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions"
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config"
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util"
)
func newFakeController(app *v1beta2.SparkApplication, pods ...*apiv1.Pod) (*Controller, *record.FakeRecorder) {
crdclientfake.AddToScheme(scheme.Scheme)
crdClient := crdclientfake.NewSimpleClientset()
kubeClient := kubeclientfake.NewSimpleClientset()
util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true}
informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 0*time.Second)
recorder := record.NewFakeRecorder(3)
kubeClient.CoreV1().Nodes().Create(context.TODO(), &apiv1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Status: apiv1.NodeStatus{
Addresses: []apiv1.NodeAddress{
{
Type: apiv1.NodeExternalIP,
Address: "12.34.56.78",
},
},
},
}, metav1.CreateOptions{})
podInformerFactory := informers.NewSharedInformerFactory(kubeClient, 0*time.Second)
controller := newSparkApplicationController(crdClient, kubeClient, informerFactory, podInformerFactory, recorder,
&util.MetricConfig{}, "", "", nil, true)
informer := informerFactory.Sparkoperator().V1beta2().SparkApplications().Informer()
if app != nil {
informer.GetIndexer().Add(app)
}
podInformer := podInformerFactory.Core().V1().Pods().Informer()
for _, pod := range pods {
if pod != nil {
podInformer.GetIndexer().Add(pod)
}
}
return controller, recorder
}
func TestOnAdd(t *testing.T) {
ctrl, _ := newFakeController(nil)
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{},
}
ctrl.onAdd(app)
item, _ := ctrl.queue.Get()
defer ctrl.queue.Done(item)
key, ok := item.(string)
assert.True(t, ok)
expectedKey, _ := cache.MetaNamespaceKeyFunc(app)
assert.Equal(t, expectedKey, key)
ctrl.queue.Forget(item)
}
func TestOnUpdate(t *testing.T) {
ctrl, recorder := newFakeController(nil)
appTemplate := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
ResourceVersion: "1",
},
Spec: v1beta2.SparkApplicationSpec{
Mode: v1beta2.ClusterMode,
Image: stringptr("foo-image:v1"),
Executor: v1beta2.ExecutorSpec{
Instances: int32ptr(1),
},
},
}
// Case1: Same Spec.
copyWithSameSpec := appTemplate.DeepCopy()
copyWithSameSpec.Status.ExecutionAttempts = 3
copyWithSameSpec.ResourceVersion = "2"
ctrl.onUpdate(appTemplate, copyWithSameSpec)
// Verify that the SparkApplication was enqueued but no spec update events fired.
item, _ := ctrl.queue.Get()
key, ok := item.(string)
assert.True(t, ok)
expectedKey, _ := cache.MetaNamespaceKeyFunc(appTemplate)
assert.Equal(t, expectedKey, key)
ctrl.queue.Forget(item)
ctrl.queue.Done(item)
assert.Equal(t, 0, len(recorder.Events))
// Case2: Spec update failed.
copyWithSpecUpdate := appTemplate.DeepCopy()
copyWithSpecUpdate.Spec.Image = stringptr("foo-image:v2")
copyWithSpecUpdate.ResourceVersion = "2"
ctrl.onUpdate(appTemplate, copyWithSpecUpdate)
// Verify that update failed due to non-existence of SparkApplication.
assert.Equal(t, 1, len(recorder.Events))
event := <-recorder.Events
assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateFailed"))
// Case3: Spec update successful.
ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Create(context.TODO(), appTemplate, metav1.CreateOptions{})
ctrl.onUpdate(appTemplate, copyWithSpecUpdate)
// Verify App was enqueued.
item, _ = ctrl.queue.Get()
key, ok = item.(string)
assert.True(t, ok)
expectedKey, _ = cache.MetaNamespaceKeyFunc(appTemplate)
assert.Equal(t, expectedKey, key)
ctrl.queue.Forget(item)
ctrl.queue.Done(item)
// Verify that update was succeeded.
assert.Equal(t, 1, len(recorder.Events))
event = <-recorder.Events
assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateProcessed"))
// Verify the SparkApplication state was updated to InvalidatingState.
app, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Get(context.TODO(), appTemplate.Name, metav1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, v1beta2.InvalidatingState, app.Status.AppState.State)
}
func TestOnDelete(t *testing.T) {
ctrl, recorder := newFakeController(nil)
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{},
}
ctrl.onAdd(app)
ctrl.queue.Get()
ctrl.onDelete(app)
ctrl.queue.ShutDown()
item, _ := ctrl.queue.Get()
defer ctrl.queue.Done(item)
assert.True(t, item == nil)
event := <-recorder.Events
assert.True(t, strings.Contains(event, "SparkApplicationDeleted"))
ctrl.queue.Forget(item)
}
func TestHelperProcessFailure(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
os.Exit(2)
}
func TestHelperProcessSuccess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
os.Exit(0)
}
func fetchCounterValue(m *prometheus.CounterVec, labels map[string]string) float64 {
pb := &prometheus_model.Metric{}
m.With(labels).Write(pb)
return pb.GetCounter().GetValue()
}
type metrics struct {
submitMetricCount float64
runningMetricCount float64
successMetricCount float64
failedMetricCount float64
}
type executorMetrics struct {
runningMetricCount float64
successMetricCount float64
failedMetricCount float64
}
func TestSyncSparkApplication_SubmissionFailed(t *testing.T) {
os.Setenv(sparkHomeEnvVar, "/spark")
os.Setenv(kubernetesServiceHostEnvVar, "localhost")
os.Setenv(kubernetesServicePortEnvVar, "443")
restartPolicyOnFailure := v1beta2.RestartPolicy{
Type: v1beta2.OnFailure,
OnFailureRetries: int32ptr(1),
OnFailureRetryInterval: int64ptr(100),
OnSubmissionFailureRetryInterval: int64ptr(100),
OnSubmissionFailureRetries: int32ptr(1),
}
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.NewState,
ErrorMessage: "",
},
},
}
ctrl, recorder := newFakeController(app)
_, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
execCommand = func(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcessFailure", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
// Attempt 1
err = ctrl.syncSparkApplication("default/foo")
updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{})
assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State)
assert.Equal(t, int32(1), updatedApp.Status.SubmissionAttempts)
assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppCount, map[string]string{}))
assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{}))
assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppFailedSubmissionCount, map[string]string{}))
event := <-recorder.Events
assert.True(t, strings.Contains(event, "SparkApplicationAdded"))
event = <-recorder.Events
assert.True(t, strings.Contains(event, "SparkApplicationSubmissionFailed"))
// Attempt 2: Retry again.
updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)}
ctrl, recorder = newFakeController(updatedApp)
_, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), updatedApp, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = ctrl.syncSparkApplication("default/foo")
// Verify that the application failed again.
updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State)
assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts)
assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{}))
event = <-recorder.Events
assert.True(t, strings.Contains(event, "SparkApplicationSubmissionFailed"))
// Attempt 3: No more retries.
updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)}
ctrl, recorder = newFakeController(updatedApp)
_, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), updatedApp, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = ctrl.syncSparkApplication("default/foo")
// Verify that the application failed again.
updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, v1beta2.FailedState, updatedApp.Status.AppState.State)
// No more submission attempts made.
assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts)
}
func TestValidateDetectsNodeSelectorSuccessNoSelector(t *testing.T) {
ctrl, _ := newFakeController(nil)
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
}
err := ctrl.validateSparkApplication(app)
assert.Nil(t, err)
}
func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtAppLevel(t *testing.T) {
ctrl, _ := newFakeController(nil)
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
NodeSelector: map[string]string{"mynode": "mygift"},
},
}
err := ctrl.validateSparkApplication(app)
assert.Nil(t, err)
}
func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtPodLevel(t *testing.T) {
ctrl, _ := newFakeController(nil)
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
Driver: v1beta2.DriverSpec{
SparkPodSpec: v1beta2.SparkPodSpec{
NodeSelector: map[string]string{"mynode": "mygift"},
},
},
},
}
err := ctrl.validateSparkApplication(app)
assert.Nil(t, err)
app.Spec.Executor = v1beta2.ExecutorSpec{
SparkPodSpec: v1beta2.SparkPodSpec{
NodeSelector: map[string]string{"mynode": "mygift"},
},
}
err = ctrl.validateSparkApplication(app)
assert.Nil(t, err)
}
func TestValidateDetectsNodeSelectorFailsAppAndPodLevel(t *testing.T) {
ctrl, _ := newFakeController(nil)
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
NodeSelector: map[string]string{"mynode": "mygift"},
Driver: v1beta2.DriverSpec{
SparkPodSpec: v1beta2.SparkPodSpec{
NodeSelector: map[string]string{"mynode": "mygift"},
},
},
},
}
err := ctrl.validateSparkApplication(app)
assert.NotNil(t, err)
app.Spec.Executor = v1beta2.ExecutorSpec{
SparkPodSpec: v1beta2.SparkPodSpec{
NodeSelector: map[string]string{"mynode": "mygift"},
},
}
err = ctrl.validateSparkApplication(app)
assert.NotNil(t, err)
}
func TestShouldRetry(t *testing.T) {
type testcase struct {
app *v1beta2.SparkApplication
shouldRetry bool
}
testFn := func(test testcase, t *testing.T) {
shouldRetry := shouldRetry(test.app)
assert.Equal(t, test.shouldRetry, shouldRetry)
}
restartPolicyAlways := v1beta2.RestartPolicy{
Type: v1beta2.Always,
OnSubmissionFailureRetryInterval: int64ptr(100),
OnFailureRetryInterval: int64ptr(100),
}
restartPolicyNever := v1beta2.RestartPolicy{
Type: v1beta2.Never,
}
restartPolicyOnFailure := v1beta2.RestartPolicy{
Type: v1beta2.OnFailure,
OnFailureRetries: int32ptr(1),
OnFailureRetryInterval: int64ptr(100),
OnSubmissionFailureRetryInterval: int64ptr(100),
OnSubmissionFailureRetries: int32ptr(2),
}
testcases := []testcase{
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
}},
shouldRetry: false,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.SucceedingState,
},
},
},
shouldRetry: true,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.SucceedingState,
},
},
},
shouldRetry: false,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailingState,
},
},
},
shouldRetry: true,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyNever,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailingState,
},
},
},
shouldRetry: false,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyNever,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailedSubmissionState,
},
},
},
shouldRetry: false,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailedSubmissionState,
},
},
},
shouldRetry: true,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.PendingRerunState,
},
},
},
shouldRetry: false,
},
}
for _, test := range testcases {
testFn(test, t)
}
}
func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) {
type testcase struct {
app *v1beta2.SparkApplication
expectedState v1beta2.ApplicationStateType
}
os.Setenv(sparkHomeEnvVar, "/spark")
os.Setenv(kubernetesServiceHostEnvVar, "localhost")
os.Setenv(kubernetesServicePortEnvVar, "443")
testFn := func(test testcase, t *testing.T) {
ctrl, _ := newFakeController(test.app)
_, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Create(context.TODO(), test.app, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
execCommand = func(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcessSuccess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", test.app.Namespace, test.app.Name))
assert.Nil(t, err)
updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Get(context.TODO(), test.app.Name, metav1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, test.expectedState, updatedApp.Status.AppState.State)
if test.app.Status.AppState.State == v1beta2.NewState {
assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppCount, map[string]string{}))
}
if test.expectedState == v1beta2.SubmittedState {
assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{}))
}
}
restartPolicyAlways := v1beta2.RestartPolicy{
Type: v1beta2.Always,
OnSubmissionFailureRetryInterval: int64ptr(100),
OnFailureRetryInterval: int64ptr(100),
}
restartPolicyNever := v1beta2.RestartPolicy{
Type: v1beta2.Never,
}
restartPolicyOnFailure := v1beta2.RestartPolicy{
Type: v1beta2.OnFailure,
OnFailureRetries: int32ptr(1),
OnFailureRetryInterval: int64ptr(100),
OnSubmissionFailureRetryInterval: int64ptr(100),
OnSubmissionFailureRetries: int32ptr(2),
}
testcases := []testcase{
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
}},
expectedState: v1beta2.SubmittedState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.SucceedingState,
},
},
},
expectedState: v1beta2.PendingRerunState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.PendingRerunState,
},
},
},
expectedState: v1beta2.SubmittedState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailedSubmissionState,
},
LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)},
},
},
expectedState: v1beta2.FailedSubmissionState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailedSubmissionState,
},
SubmissionAttempts: 1,
LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)},
},
},
expectedState: v1beta2.SubmittedState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailingState,
},
ExecutionAttempts: 1,
TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)},
},
},
expectedState: v1beta2.PendingRerunState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyAlways,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailingState,
},
TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)},
},
},
expectedState: v1beta2.FailingState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyNever,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.InvalidatingState,
},
TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)},
},
},
expectedState: v1beta2.PendingRerunState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyNever,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.SucceedingState,
},
},
},
expectedState: v1beta2.CompletedState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyNever,
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.NewState,
},
},
},
expectedState: v1beta2.SubmittedState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailingState,
},
ExecutionAttempts: 2,
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
},
expectedState: v1beta2.FailedState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailingState,
},
ExecutionAttempts: 1,
TerminationTime: metav1.Now(),
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
},
expectedState: v1beta2.FailingState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailingState,
},
ExecutionAttempts: 1,
TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)},
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
},
expectedState: v1beta2.PendingRerunState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailedSubmissionState,
},
SubmissionAttempts: 3,
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
},
expectedState: v1beta2.FailedState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailedSubmissionState,
},
SubmissionAttempts: 1,
LastSubmissionAttemptTime: metav1.Now(),
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
},
expectedState: v1beta2.FailedSubmissionState,
},
{
app: &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.FailedSubmissionState,
},
SubmissionAttempts: 1,
LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)},
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: restartPolicyOnFailure,
},
},
expectedState: v1beta2.SubmittedState,
},
}
for _, test := range testcases {
testFn(test, t)
}
}
func TestSyncSparkApplication_ExecutingState(t *testing.T) {
type testcase struct {
appName string
oldAppStatus v1beta2.ApplicationStateType
oldExecutorStatus map[string]v1beta2.ExecutorState
driverPod *apiv1.Pod
executorPod *apiv1.Pod
expectedAppState v1beta2.ApplicationStateType
expectedExecutorState map[string]v1beta2.ExecutorState
expectedAppMetrics metrics
expectedExecutorMetrics executorMetrics
}
os.Setenv(kubernetesServiceHostEnvVar, "localhost")
os.Setenv(kubernetesServicePortEnvVar, "443")
appName := "foo"
driverPodName := appName + "-driver"
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: "test",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: v1beta2.RestartPolicy{
Type: v1beta2.Never,
},
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.SubmittedState,
ErrorMessage: "",
},
DriverInfo: v1beta2.DriverInfo{
PodName: driverPodName,
},
ExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
},
}
testcases := []testcase{
{
appName: appName,
oldAppStatus: v1beta2.SubmittedState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
expectedAppState: v1beta2.FailingState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState},
expectedAppMetrics: metrics{
failedMetricCount: 1,
},
expectedExecutorMetrics: executorMetrics{
failedMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.SubmittedState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodRunning,
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
expectedAppState: v1beta2.RunningState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{
runningMetricCount: 1,
},
expectedExecutorMetrics: executorMetrics{
successMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.RunningState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodRunning,
ContainerStatuses: []apiv1.ContainerStatus{
{
Name: config.SparkDriverContainerName,
State: apiv1.ContainerState{
Running: &apiv1.ContainerStateRunning{},
},
},
{
Name: "sidecar",
State: apiv1.ContainerState{
Terminated: &apiv1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
},
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
expectedAppState: v1beta2.RunningState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{},
expectedExecutorMetrics: executorMetrics{
successMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.RunningState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodRunning,
ContainerStatuses: []apiv1.ContainerStatus{
{
Name: config.SparkDriverContainerName,
State: apiv1.ContainerState{
Terminated: &apiv1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
{
Name: "sidecar",
State: apiv1.ContainerState{
Running: &apiv1.ContainerStateRunning{},
},
},
},
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
expectedAppState: v1beta2.SucceedingState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{
successMetricCount: 1,
},
expectedExecutorMetrics: executorMetrics{
successMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.RunningState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodRunning,
ContainerStatuses: []apiv1.ContainerStatus{
{
Name: config.SparkDriverContainerName,
State: apiv1.ContainerState{
Terminated: &apiv1.ContainerStateTerminated{
ExitCode: 137,
Reason: "OOMKilled",
},
},
},
{
Name: "sidecar",
State: apiv1.ContainerState{
Running: &apiv1.ContainerStateRunning{},
},
},
},
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
expectedAppState: v1beta2.FailingState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{
failedMetricCount: 1,
},
expectedExecutorMetrics: executorMetrics{
successMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.RunningState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodFailed,
ContainerStatuses: []apiv1.ContainerStatus{
{
Name: config.SparkDriverContainerName,
State: apiv1.ContainerState{
Terminated: &apiv1.ContainerStateTerminated{
ExitCode: 137,
Reason: "OOMKilled",
},
},
},
},
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodFailed,
ContainerStatuses: []apiv1.ContainerStatus{
{
Name: config.SparkExecutorContainerName,
State: apiv1.ContainerState{
Terminated: &apiv1.ContainerStateTerminated{
ExitCode: 137,
Reason: "OOMKilled",
},
},
},
},
},
},
expectedAppState: v1beta2.FailingState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState},
expectedAppMetrics: metrics{
failedMetricCount: 1,
},
expectedExecutorMetrics: executorMetrics{
failedMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.RunningState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodFailed,
ContainerStatuses: []apiv1.ContainerStatus{
{
Name: config.SparkDriverContainerName,
State: apiv1.ContainerState{
Terminated: &apiv1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
{
Name: "sidecar",
State: apiv1.ContainerState{
Terminated: &apiv1.ContainerStateTerminated{
ExitCode: 137,
Reason: "OOMKilled",
},
},
},
},
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
expectedAppState: v1beta2.SucceedingState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{
successMetricCount: 1,
},
expectedExecutorMetrics: executorMetrics{
successMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.FailingState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState},
expectedAppState: v1beta2.FailedState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState},
expectedAppMetrics: metrics{},
expectedExecutorMetrics: executorMetrics{},
},
{
appName: appName,
oldAppStatus: v1beta2.RunningState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
expectedAppState: v1beta2.SucceedingState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{
successMetricCount: 1,
},
expectedExecutorMetrics: executorMetrics{
successMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.SucceedingState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppState: v1beta2.CompletedState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{},
expectedExecutorMetrics: executorMetrics{},
},
{
appName: appName,
oldAppStatus: v1beta2.SubmittedState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
},
Status: apiv1.PodStatus{
Phase: apiv1.PodUnknown,
},
},
executorPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "exec-1",
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkExecutorRole,
config.SparkAppNameLabel: appName,
},
},
Status: apiv1.PodStatus{
Phase: apiv1.PodPending,
},
},
expectedAppState: v1beta2.UnknownState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorPendingState},
expectedAppMetrics: metrics{},
expectedExecutorMetrics: executorMetrics{},
},
{
appName: appName,
oldAppStatus: v1beta2.CompletedState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorPendingState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodSucceeded,
},
},
expectedAppState: v1beta2.CompletedState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
expectedAppMetrics: metrics{},
expectedExecutorMetrics: executorMetrics{
successMetricCount: 1,
},
},
{
appName: appName,
oldAppStatus: v1beta2.RunningState,
oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState},
driverPod: &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: driverPodName,
Namespace: "test",
Labels: map[string]string{
config.SparkRoleLabel: config.SparkDriverRole,
config.SparkAppNameLabel: appName,
},
ResourceVersion: "1",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodRunning,
},
},
expectedAppState: v1beta2.RunningState,
expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorUnknownState},
expectedAppMetrics: metrics{},
expectedExecutorMetrics: executorMetrics{},
},
}
testFn := func(test testcase, t *testing.T) {
app.Status.AppState.State = test.oldAppStatus
app.Status.ExecutorState = test.oldExecutorStatus
app.Name = test.appName
app.Status.ExecutionAttempts = 1
ctrl, _ := newFakeController(app, test.driverPod, test.executorPod)
_, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
if test.driverPod != nil {
ctrl.kubeClient.CoreV1().Pods(app.Namespace).Create(context.TODO(), test.driverPod, metav1.CreateOptions{})
}
if test.executorPod != nil {
ctrl.kubeClient.CoreV1().Pods(app.Namespace).Create(context.TODO(), test.executorPod, metav1.CreateOptions{})
}
err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name))
assert.Nil(t, err)
// Verify application and executor states.
updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{})
assert.Equal(t, test.expectedAppState, updatedApp.Status.AppState.State)
assert.Equal(t, test.expectedExecutorState, updatedApp.Status.ExecutorState)
// Validate error message if the driver pod failed.
if test.driverPod != nil && test.driverPod.Status.Phase == apiv1.PodFailed {
if len(test.driverPod.Status.ContainerStatuses) > 0 && test.driverPod.Status.ContainerStatuses[0].State.Terminated != nil {
if test.driverPod.Status.ContainerStatuses[0].State.Terminated.ExitCode != 0 {
assert.Equal(t, updatedApp.Status.AppState.ErrorMessage,
fmt.Sprintf("driver container failed with ExitCode: %d, Reason: %s", test.driverPod.Status.ContainerStatuses[0].State.Terminated.ExitCode, test.driverPod.Status.ContainerStatuses[0].State.Terminated.Reason))
}
} else {
assert.Equal(t, updatedApp.Status.AppState.ErrorMessage, "driver container status missing")
}
}
// Verify application metrics.
assert.Equal(t, test.expectedAppMetrics.runningMetricCount, ctrl.metrics.sparkAppRunningCount.Value(map[string]string{}))
assert.Equal(t, test.expectedAppMetrics.successMetricCount, fetchCounterValue(ctrl.metrics.sparkAppSuccessCount, map[string]string{}))
assert.Equal(t, test.expectedAppMetrics.submitMetricCount, fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{}))
assert.Equal(t, test.expectedAppMetrics.failedMetricCount, fetchCounterValue(ctrl.metrics.sparkAppFailureCount, map[string]string{}))
// Verify executor metrics.
assert.Equal(t, test.expectedExecutorMetrics.runningMetricCount, ctrl.metrics.sparkAppExecutorRunningCount.Value(map[string]string{}))
assert.Equal(t, test.expectedExecutorMetrics.successMetricCount, fetchCounterValue(ctrl.metrics.sparkAppExecutorSuccessCount, map[string]string{}))
assert.Equal(t, test.expectedExecutorMetrics.failedMetricCount, fetchCounterValue(ctrl.metrics.sparkAppExecutorFailureCount, map[string]string{}))
}
for _, test := range testcases {
testFn(test, t)
}
}
func TestSyncSparkApplication_ApplicationExpired(t *testing.T) {
os.Setenv(kubernetesServiceHostEnvVar, "localhost")
os.Setenv(kubernetesServicePortEnvVar, "443")
appName := "foo"
driverPodName := appName + "-driver"
now := time.Now()
terminationTime := now.Add(-2 * time.Second)
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: "test",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: v1beta2.RestartPolicy{
Type: v1beta2.Never,
},
TimeToLiveSeconds: int64ptr(1),
},
Status: v1beta2.SparkApplicationStatus{
AppState: v1beta2.ApplicationState{
State: v1beta2.CompletedState,
ErrorMessage: "",
},
DriverInfo: v1beta2.DriverInfo{
PodName: driverPodName,
},
TerminationTime: metav1.Time{
Time: terminationTime,
},
ExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState},
},
}
ctrl, _ := newFakeController(app)
_, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name))
assert.Nil(t, err)
_, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{})
assert.True(t, errors.IsNotFound(err))
}
func TestIsNextRetryDue(t *testing.T) {
// Failure cases.
assert.False(t, isNextRetryDue(nil, 3, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)}))
assert.False(t, isNextRetryDue(int64ptr(5), 0, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)}))
assert.False(t, isNextRetryDue(int64ptr(5), 3, metav1.Time{}))
// Not enough time passed.
assert.False(t, isNextRetryDue(int64ptr(50), 3, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)}))
assert.True(t, isNextRetryDue(int64ptr(50), 3, metav1.Time{Time: metav1.Now().Add(-151 * time.Second)}))
}
func TestIngressWithSubpathAffectsSparkConfiguration(t *testing.T) {
os.Setenv(kubernetesServiceHostEnvVar, "localhost")
os.Setenv(kubernetesServicePortEnvVar, "443")
appName := "ingressaffectssparkconfig"
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: "test",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: v1beta2.RestartPolicy{
Type: v1beta2.Never,
},
TimeToLiveSeconds: int64ptr(1),
},
Status: v1beta2.SparkApplicationStatus{},
}
ctrl, _ := newFakeController(app)
ctrl.ingressURLFormat = "example.com/{{$appNamespace}}/{{$appName}}"
ctrl.enableUIService = true
_, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name))
assert.Nil(t, err)
deployedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
ingresses, err := ctrl.kubeClient.NetworkingV1().Ingresses(app.Namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
if ingresses == nil || ingresses.Items == nil || len(ingresses.Items) != 1 {
t.Fatal("The ingress does not exist, has no items, or wrong amount of items")
}
if ingresses.Items[0].Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Path != "/"+app.Namespace+"/"+app.Name+"(/|$)(.*)" {
t.Fatal("The ingress subpath was not created successfully.")
}
// The controller doesn't sync changes to the sparkConf performed by submitSparkApplication back to the kubernetes API server.
if deployedApp.Spec.SparkConf["spark.ui.proxyBase"] != "/"+app.Namespace+"/"+app.Name {
t.Log("The spark configuration does not reflect the subpath expected by the ingress")
}
if deployedApp.Spec.SparkConf["spark.ui.proxyRedirectUri"] != "/" {
t.Log("The spark configuration does not reflect the proxyRedirectUri expected by the ingress")
}
}
func TestIngressWithClassName(t *testing.T) {
os.Setenv(kubernetesServiceHostEnvVar, "localhost")
os.Setenv(kubernetesServicePortEnvVar, "443")
appName := "ingressaffectssparkconfig"
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: "test",
},
Spec: v1beta2.SparkApplicationSpec{
RestartPolicy: v1beta2.RestartPolicy{
Type: v1beta2.Never,
},
TimeToLiveSeconds: int64ptr(1),
},
Status: v1beta2.SparkApplicationStatus{},
}
ctrl, _ := newFakeController(app)
ctrl.ingressURLFormat = "{{$appNamespace}}.{{$appName}}.example.com"
ctrl.ingressClassName = "nginx"
ctrl.enableUIService = true
_, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name))
assert.Nil(t, err)
_, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
ingresses, err := ctrl.kubeClient.NetworkingV1().Ingresses(app.Namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
if ingresses == nil || ingresses.Items == nil || len(ingresses.Items) != 1 {
t.Fatal("The ingress does not exist, has no items, or wrong amount of items")
}
if ingresses.Items[0].Spec.IngressClassName == nil || *ingresses.Items[0].Spec.IngressClassName != "nginx" {
t.Fatal("The ingressClassName does not exists, or wrong value is set")
}
}
func stringptr(s string) *string {
return &s
}
func int32ptr(n int32) *int32 {
return &n
}
| [
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\""
] | [] | [
"GO_WANT_HELPER_PROCESS"
] | [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
core/src/main/java/com/customized/libs/core/libs/system/SystemInvoker.java | package com.customized.libs.core.libs.system;
import com.alibaba.fastjson.JSON;
import java.util.Map;
import java.util.Properties;
/**
* @author yan
*/
public class SystemInvoker {
public static void main(String[] args) {
// 系统环境变量
Map<String, String> getenv = System.getenv();
System.out.println(JSON.toJSONString(getenv));
// JAVA内置属性变量
Properties properties = System.getProperties();
System.out.println(JSON.toJSONString(properties));
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
src/typhoonae/redis/tests/test_datastore_redis.py | # -*- coding: utf-8 -*-
#
# Copyright 2010 Tobias Rodäbel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Datastore Redis stub."""
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.ext import db
import datetime
import google.appengine.api.apiproxy_stub
import google.appengine.api.apiproxy_stub_map
import google.appengine.api.datastore_admin
import google.appengine.api.datastore_errors
import google.appengine.api.users
import google.appengine.datastore.entity_pb
import google.appengine.runtime.apiproxy_errors
import os
import time
import threading
import typhoonae.redis.datastore_redis_stub
import unittest
class DatastoreRedisTestCaseBase(unittest.TestCase):
"""Base class for testing the TyphoonAE Datastore Redis API proxy stub."""
def setUp(self):
"""Sets up test environment and regisers stub."""
# Set required environment variables
os.environ['APPLICATION_ID'] = 'test'
os.environ['AUTH_DOMAIN'] = 'mydomain.local'
os.environ['USER_EMAIL'] = '[email protected]'
os.environ['USER_IS_ADMIN'] = '1'
# Read index definitions.
index_yaml = open(
os.path.join(os.path.dirname(__file__), 'index.yaml'), 'r')
try:
indexes = datastore_index.IndexDefinitionsToProtos(
'test',
datastore_index.ParseIndexDefinitions(index_yaml).indexes)
except TypeError:
indexes = []
index_yaml.close()
# Register API proxy stub.
google.appengine.api.apiproxy_stub_map.apiproxy = (
google.appengine.api.apiproxy_stub_map.APIProxyStubMap())
datastore = typhoonae.redis.datastore_redis_stub.DatastoreRedisStub(
'test', indexes)
try:
google.appengine.api.apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v3', datastore)
except google.appengine.runtime.apiproxy_errors.ApplicationError, e:
raise RuntimeError('These tests require a running Redis server '
'(%s)' % e)
self.stub = google.appengine.api.apiproxy_stub_map.apiproxy.GetStub(
'datastore_v3')
def tearDown(self):
"""Clears all data."""
self.stub.Clear()
class StoredEntityTestCase(DatastoreRedisTestCaseBase):
"""Testing entity wrapper class."""
def testStoredEntity(self):
"""Initializes a stored entity instance."""
class MyModel(db.Model):
contents = db.StringProperty()
key = MyModel(contents="Some contents.").save()
entity = db.get(key)
protobuf = db.model_to_protobuf(entity)
stored_entity = typhoonae.redis.datastore_redis_stub._StoredEntity(
protobuf)
self.assertEqual(protobuf, stored_entity.protobuf)
self.assertEqual(
'j\x15j\x04testr\r\x0b\x12\x07MyModel\x18\x01\x0cr\x1e\x1a\x08'
'contents \x00*\x10\x1a\x0eSome contents.\x82\x01\r\x0b\x12\x07'
'MyModel\x18\x01\x0c',
stored_entity.encoded_protobuf)
self.assertEqual({u'contents': u'Some contents.'}, stored_entity.native)
self.assertTrue(
isinstance(
stored_entity.key(),
google.appengine.datastore.entity_pb.Reference))
class DatastoreRedisTestCase(DatastoreRedisTestCaseBase):
"""Testing the TyphoonAE Datastore Redis API proxy stub."""
def testStub(self):
"""Tests whether our stub is registered."""
self.assertNotEqual(None, self.stub)
def testConnectionError(self):
"""Tries to connect to wrong host and port."""
self.assertRaises(
google.appengine.runtime.apiproxy_errors.ApplicationError,
typhoonae.redis.datastore_redis_stub.DatastoreRedisStub,
'test', [], host='nowhere', port=10987)
def test__ValidateAppId(self):
"""Validates an application id."""
self.assertRaises(
google.appengine.api.datastore_errors.BadRequestError,
self.stub._DatastoreRedisStub__ValidateAppId,
'foo')
def test_GetAppIdNamespaceKindForKey(self):
"""Gets encoded app and kind from given key."""
ref = google.appengine.datastore.entity_pb.Reference()
ref.set_app(u'test')
ref.set_name_space(u'namespace')
path = ref.mutable_path()
elem = path.add_element()
elem.set_type('Foo')
elem = path.add_element()
elem.set_type('Bar')
self.assertEqual(
u'test!namespace\x08Bar',
self.stub._GetAppIdNamespaceKindForKey(ref))
def test_GetKeyForRedisKey(self):
"""Inititalizes an entity_pb.Reference from a Redis key."""
key = self.stub._GetKeyForRedisKey(
u'test!Foo\x08\t0000000000002\x07Bar\x08bar')
self.assertEqual(
datastore_types.Key.from_path(
u'Foo', 2, u'Bar', u'bar', _app=u'test'),
key)
def test_GetRedisKeyForKey(self):
"""Creates a valid Redis key."""
ref = google.appengine.datastore.entity_pb.Reference()
ref.set_app(u'test')
ref.set_name_space(u'namespace')
path = ref.mutable_path()
elem = path.add_element()
elem.set_type('Foo')
elem.set_id(1)
elem = path.add_element()
elem.set_type('Bar')
elem.set_id(2)
self.assertEqual(
u'test!Foo\x08\t0000000000001\x07Bar\x08\t0000000000002',
self.stub._GetRedisKeyForKey(ref))
def testPutGetDelete(self):
"""Puts/gets/deletes entities into/from the datastore."""
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
a = Author(name='Mark Twain', key_name='marktwain')
a.put()
b = Book(parent=a, title="The Adventures Of Tom Sawyer")
b.put()
key = b.key()
del a, b
book = google.appengine.api.datastore.Get(key)
self.assertEqual(
"{u'title': u'The Adventures Of Tom Sawyer'}", str(book))
author = google.appengine.api.datastore.Get(book.parent())
self.assertEqual("{u'name': u'Mark Twain'}", str(author))
del book
google.appengine.api.datastore.Delete(key)
self.assertRaises(
google.appengine.api.datastore_errors.EntityNotFoundError,
google.appengine.api.datastore.Get,
key)
del author
mark_twain = Author.get_by_key_name('marktwain')
self.assertEqual('Author', mark_twain.kind())
self.assertEqual('Mark Twain', mark_twain.name)
mark_twain.delete()
def testGetEntitiesByNameAndID(self):
"""Tries to retrieve entities by name or numeric id."""
class Book(db.Model):
title = db.StringProperty()
Book(title="The Hitchhiker's Guide to the Galaxy").put()
book = Book.get_by_id(1)
self.assertEqual("The Hitchhiker's Guide to the Galaxy", book.title)
Book(key_name="solong",
title="So Long, and Thanks for All the Fish").put()
book = Book.get_by_key_name("solong")
self.assertEqual("So Long, and Thanks for All the Fish", book.title)
def testLocking(self):
"""Acquires and releases transaction locks."""
self.stub._AcquireLockForEntityGroup('foo', timeout=1)
self.stub._ReleaseLockForEntityGroup('foo')
self.stub._AcquireLockForEntityGroup('bar', timeout=2)
t = time.time()
self.stub._AcquireLockForEntityGroup('bar', timeout=1)
assert time.time() > t + 1
self.stub._ReleaseLockForEntityGroup('bar')
def testTransactions(self):
"""Executes 1000 transactions in 10 concurrent threads."""
class Counter(db.Model):
value = db.IntegerProperty()
counter = Counter(key_name='counter', value=0)
counter.put()
del counter
class Incrementer(threading.Thread):
def run(self):
def tx():
counter = Counter.get_by_key_name('counter')
counter.value += 1
counter.put()
for i in range(100):
db.run_in_transaction(tx)
incrementers = []
for i in range(10):
incrementers.append(Incrementer())
incrementers[i].start()
for incr in incrementers:
incr.join()
counter = Counter.get_by_key_name('counter')
self.assertEqual(1000, counter.value)
def testLargerTransaction(self):
"""Executes multiple operations in one transaction."""
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
def tx():
a = Author(name='Mark Twain', key_name='marktwain')
a.put()
b = Book(parent=a, title="The Adventures Of Tom Sawyer")
b.put()
b.delete()
db.run_in_transaction(tx)
self.assertEqual(1, Author.all().count())
self.assertEqual(0, Book.all().count())
marktwain = Author.get_by_key_name('marktwain')
def query_tx():
query = db.Query()
query.filter('__key__ = ', marktwain.key())
author = query.get()
self.assertRaises(
google.appengine.api.datastore_errors.BadRequestError,
db.run_in_transaction, query_tx)
def testKindlessAncestorQueries(self):
"""Perform kindless queries for entities with a given ancestor."""
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
author = Author(name='Mark Twain', key_name='marktwain').put()
book = Book(parent=author, title="The Adventures Of Tom Sawyer").put()
query = db.Query()
query.ancestor(author)
query.filter('__key__ = ', book)
self.assertEqual(book, query.get().key())
book = query.get()
book.delete()
self.assertEqual(0, query.count())
def testRunQuery(self):
"""Runs some simple queries."""
class Employee(db.Model):
first_name = db.StringProperty(required=True)
last_name = db.StringProperty(required=True)
manager = db.SelfReferenceProperty()
manager = Employee(first_name='John', last_name='Dowe')
manager.put()
employee = Employee(
first_name=u'John', last_name='Appleseed', manager=manager.key())
employee.put()
# Perform a very simple query.
query = Employee.all()
self.assertEqual(set(['John Dowe', 'John Appleseed']),
set(['%s %s' % (e.first_name, e.last_name)
for e in query.run()]))
# Rename the manager.
manager.first_name = 'Clara'
manager.put()
# And perform the same query as above.
query = Employee.all()
self.assertEqual(set(['Clara Dowe', 'John Appleseed']),
set(['%s %s' % (e.first_name, e.last_name)
for e in query.run()]))
# Get only one entity.
query = Employee.all()
self.assertEqual(u'Dowe', query.get().last_name)
self.assertEqual(u'Dowe', query.fetch(1)[0].last_name)
# Delete our entities.
employee.delete()
manager.delete()
# Our query results should now be empty.
query = Employee.all()
self.assertEqual([], list(query.run()))
def testCount(self):
"""Counts query results."""
class Balloon(db.Model):
color = db.StringProperty()
Balloon(color='Red').put()
self.assertEqual(1, Balloon.all().count())
Balloon(color='Blue').put()
self.assertEqual(2, Balloon.all().count())
def testQueryWithFilter(self):
"""Tries queries with filters."""
class SomeKind(db.Model):
value = db.StringProperty()
foo = SomeKind(value="foo")
foo.put()
bar = SomeKind(value="bar")
bar.put()
class Artifact(db.Model):
description = db.StringProperty(required=True)
age = db.IntegerProperty()
vase = Artifact(description="Mycenaean stirrup vase", age=3300)
vase.put()
helmet = Artifact(description="Spartan full size helmet", age=2400)
helmet.put()
unknown = Artifact(description="Some unknown artifact")
unknown.put()
query = Artifact.all().filter('age =', 2400)
self.assertEqual(
['Spartan full size helmet'],
[artifact.description for artifact in query.run()])
query = db.GqlQuery("SELECT * FROM Artifact WHERE age = :1", 3300)
self.assertEqual(
['Mycenaean stirrup vase'],
[artifact.description for artifact in query.run()])
query = Artifact.all().filter('age IN', [2400, 3300])
self.assertEqual(
set(['Spartan full size helmet', 'Mycenaean stirrup vase']),
set([artifact.description for artifact in query.run()]))
vase.delete()
query = Artifact.all().filter('age IN', [2400])
self.assertEqual(
['Spartan full size helmet'],
[artifact.description for artifact in query.run()])
helmet.age = 2300
helmet.put()
query = Artifact.all().filter('age =', 2300)
self.assertEqual([2300], [artifact.age for artifact in query.run()])
query = Artifact.all()
self.assertEqual(
set([2300L, None]),
set([artifact.age for artifact in query.run()]))
def testQueryForKeysOnly(self):
"""Queries for entity keys instead of full entities."""
class Asset(db.Model):
name = db.StringProperty(required=True)
price = db.FloatProperty(required=True)
lamp = Asset(name="Bedside Lamp", price=10.45)
lamp.put()
towel = Asset(name="Large Towel", price=3.50)
towel.put()
query = Asset.all(keys_only=True)
self.assertEqual(
set([
datastore_types.Key.from_path(u'Asset', 1, _app=u'test'),
datastore_types.Key.from_path(u'Asset', 2, _app=u'test')]),
set(query.run()))
def testQueryWithOrder(self):
"""Tests queries with sorting."""
class Planet(db.Model):
name = db.StringProperty()
moon_count = db.IntegerProperty()
distance = db.FloatProperty()
earth = Planet(name="Earth", distance=93.0, moon_count=1)
earth.put()
saturn = Planet(name="Saturn", distance=886.7, moon_count=18)
saturn.put()
venus = Planet(name="Venus", distance=67.2, moon_count=0)
venus.put()
mars = Planet(name="Mars", distance=141.6, moon_count=2)
mars.put()
mercury = Planet(name="Mercury", distance=36.0, moon_count=0)
mercury.put()
query = (Planet.all()
.filter('moon_count <', 10)
.order('moon_count')
.order('-name')
.order('distance'))
self.assertEqual(
[u'Venus', u'Mercury', u'Earth', u'Mars'],
[planet.name for planet in query.run()]
)
query = Planet.all().filter('distance >', 100).order('-distance')
self.assertEqual(
['Saturn', 'Mars'],
[planet.name for planet in query.run()]
)
query = Planet.all().filter('distance <=', 93).order('distance')
self.assertEqual(
['Mercury', 'Venus', 'Earth'],
[planet.name for planet in query.run()]
)
query = (Planet.all()
.filter('distance >', 80.0)
.filter('distance <', 150)
.order('distance'))
self.assertEqual(
['Earth', 'Mars'],
[planet.name for planet in query.run()])
query = Planet.all().filter('distance >=', 93.0).order('distance')
self.assertEqual(
[u'Earth', u'Mars', u'Saturn'],
[planet.name for planet in query.run()])
query = Planet.all().filter('distance ==', 93.0)
self.assertEqual(
[u'Earth'], [planet.name for planet in query.run()])
def testQueriesWithMultipleFiltersAndOrders(self):
"""Tests queries with multiple filters and orders."""
class Artist(db.Model):
name = db.StringProperty()
class Album(db.Model):
title = db.StringProperty()
class Song(db.Model):
artist = db.ReferenceProperty(Artist)
album = db.ReferenceProperty(Album)
duration = db.StringProperty()
genre = db.CategoryProperty()
title = db.StringProperty()
beatles = Artist(name="The Beatles")
beatles.put()
abbeyroad = Album(title="Abbey Road")
abbeyroad.put()
herecomesthesun = Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="3:06",
genre=db.Category("Pop"),
title="Here Comes The Sun")
herecomesthesun.put()
query = (Song.all()
.filter('artist =', beatles)
.filter('album =', abbeyroad))
self.assertEqual(u'Here Comes The Sun', query.get().title)
cometogether = Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="4:21",
genre=db.Category("Pop"),
title="Come Together")
cometogether.put()
something = Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="3:03",
genre=db.Category("Pop"),
title="Something")
something.put()
because1 = Song(
key_name='because',
artist=beatles.key(),
album=abbeyroad.key(),
duration="2:46",
genre=db.Category("Pop"),
title="Because")
because1.put()
because2= Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="2:46",
genre=db.Category("Pop"),
title="Because")
because2.put()
query = (Song.all()
.filter('artist =', beatles)
.filter('album =', abbeyroad)
.order('title'))
self.assertEqual(
[u'Because', u'Because', u'Come Together', u'Here Comes The Sun',
u'Something'],
[song.title for song in query.run()])
query = Song.all().filter('title !=', 'Because').order('title')
self.assertEqual(
[u'Come Together', u'Here Comes The Sun', u'Something'],
[song.title for song in query.run()])
query = Song.all().filter('title >', 'Come').order('title')
self.assertEqual(
[u'Come Together', u'Here Comes The Sun', u'Something'],
[song.title for song in query.run()])
something.delete()
query = Song.all().filter('title >', 'Come').order('title')
self.assertEqual(
[u'Come Together', u'Here Comes The Sun'],
[song.title for song in query.run()])
def testUnicode(self):
"""Tests unicode."""
class Employee(db.Model):
first_name = db.StringProperty(required=True)
last_name = db.StringProperty(required=True)
employee = Employee(first_name=u'Björn', last_name=u'Müller')
employee.put()
query = Employee.all(keys_only=True).filter('first_name =', u'Björn')
self.assertEqual(
datastore_types.Key.from_path(u'Employee', 1, _app=u'test'),
query.get())
def testListProperties(self):
"""Tests list properties."""
class Numbers(db.Model):
values = db.ListProperty(int)
Numbers(values=[0, 1, 2, 3]).put()
Numbers(values=[4, 5, 6, 7]).put()
query = Numbers.all().filter('values =', 0)
self.assertEqual([0, 1, 2, 3], query.get().values)
query = db.GqlQuery(
"SELECT * FROM Numbers WHERE values > :1 AND values < :2", 4, 7)
self.assertEqual([4, 5, 6, 7], query.get().values)
class Issue(db.Model):
reviewers = db.ListProperty(db.Email)
me = db.Email('[email protected]')
you = db.Email('[email protected]')
issue = Issue(reviewers=[me, you])
issue.put()
query = db.GqlQuery(
"SELECT * FROM Issue WHERE reviewers = :1",
db.Email('[email protected]'))
self.assertEqual(1, query.count())
query = db.GqlQuery(
"SELECT * FROM Issue WHERE reviewers = :1",
'[email protected]')
self.assertEqual(1, query.count())
query = db.GqlQuery(
"SELECT * FROM Issue WHERE reviewers = :1",
db.Email('[email protected]'))
self.assertEqual(0, query.count())
def testStringListProperties(self):
"""Tests string list properties."""
class Pizza(db.Model):
topping = db.StringListProperty()
Pizza(topping=["tomatoe", "cheese"]).put()
Pizza(topping=["tomatoe", "cheese", "salami"]).put()
Pizza(topping=["tomatoe", "cheese", "prosciutto"]).put()
query = Pizza.all(keys_only=True).filter('topping =', "salami")
self.assertEqual(1, query.count())
query = Pizza.all(keys_only=True).filter('topping =', "cheese")
self.assertEqual(3, query.count())
query = Pizza.all().filter('topping IN', ["salami", "prosciutto"])
self.assertEqual(2, query.count())
key = datastore_types.Key.from_path('Pizza', 1)
query = db.GqlQuery("SELECT * FROM Pizza WHERE __key__ IN :1", [key])
pizza = query.get()
self.assertEqual(["tomatoe", "cheese"], pizza.topping)
pizza.delete()
query = db.GqlQuery("SELECT * FROM Pizza WHERE __key__ IN :1", [key])
self.assertEqual(0, query.count())
def testVariousPropertiyTypes(self):
"""Tests various property types."""
class Note(db.Model):
timestamp = db.DateTimeProperty(auto_now=True)
description = db.StringProperty()
author_email = db.EmailProperty()
location = db.GeoPtProperty()
user = db.UserProperty()
Note(
description="My first note.",
author_email="[email protected]",
location="52.518,13.408",
user=google.appengine.api.users.get_current_user()
).put()
query = db.GqlQuery("SELECT * FROM Note ORDER BY timestamp DESC")
self.assertEqual(1, query.count())
query = db.GqlQuery(
"SELECT * FROM Note WHERE timestamp <= :1", datetime.datetime.now())
self.assertEqual(1, query.count())
note = query.get()
self.assertEqual("My first note.", note.description)
self.assertEqual(db.Email("[email protected]"), note.author_email)
self.assertEqual("[email protected]", note.author_email)
self.assertEqual(
datastore_types.GeoPt(52.518000000000001, 13.407999999999999),
note.location)
self.assertEqual("52.518,13.408", note.location)
del note
query = Note.all().filter(
'location =',
datastore_types.GeoPt(52.518000000000001, 13.407999999999999))
self.assertEqual(1, query.count())
query = Note.all().filter('location =', "52.518,13.408")
self.assertEqual(1, query.count())
def testQueriesWithLimit(self):
"""Retrieves a limited number of results."""
class MyModel(db.Model):
property = db.StringProperty()
for i in range(100):
MyModel(property="Random data.").put()
self.assertEqual(50, MyModel.all().count(limit=50))
def testAllocateIds(self):
""" """
class EmptyModel(db.Model):
pass
for i in xrange(0, 1000):
key = EmptyModel().put()
query = db.GqlQuery("SELECT * FROM EmptyModel")
self.assertEqual(1000, query.count())
start, end = db.allocate_ids(key, 2000)
self.assertEqual(start, 1000)
self.assertEqual(end, 2999)
def testCursors(self):
"""Tests the cursor API."""
class Integer(db.Model):
value = db.IntegerProperty()
for i in xrange(0, 2000):
Integer(value=i).put()
# Set up a simple query.
query = Integer.all()
# Fetch some results.
a = query.fetch(500)
self.assertEqual(0L, a[0].value)
self.assertEqual(499L, a[-1].value)
b = query.fetch(500, offset=500)
self.assertEqual(500L, b[0].value)
self.assertEqual(999L, b[-1].value)
# Perform several queries with a cursor.
cursor = query.cursor()
query.with_cursor(cursor)
c = query.fetch(200)
self.assertEqual(1000L, c[0].value)
self.assertEqual(1199L, c[-1].value)
query.with_cursor(query.cursor())
d = query.fetch(500)
self.assertEqual(1200L, d[0].value)
self.assertEqual(1699L, d[-1].value)
query.with_cursor(query.cursor())
self.assertEqual(1700L, query.get().value)
# Use a query with filters.
query = Integer.all().filter('value >', 500).filter('value <=', 1000)
e = query.fetch(100)
query.with_cursor(query.cursor())
e = query.fetch(50)
self.assertEqual(601, e[0].value)
self.assertEqual(650, e[-1].value)
def testGetSchema(self):
"""Infers an app's schema from the entities in the datastore."""
class Foo(db.Model):
foobar = db.IntegerProperty(default=42)
Foo().put()
entity_pbs = google.appengine.api.datastore_admin.GetSchema()
entity = google.appengine.api.datastore.Entity.FromPb(entity_pbs.pop())
self.assertEqual('Foo', entity.key().kind())
| [] | [] | [
"AUTH_DOMAIN",
"USER_EMAIL",
"USER_IS_ADMIN",
"APPLICATION_ID"
] | [] | ["AUTH_DOMAIN", "USER_EMAIL", "USER_IS_ADMIN", "APPLICATION_ID"] | python | 4 | 0 | |
airone/wsgi.py | """
WSGI config for airone project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
import importlib
from django.conf import settings
from configurations.wsgi import get_wsgi_application
from airone.lib.log import Logger
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "airone.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
for extension in settings.AIRONE["EXTENSIONS"]:
try:
importlib.import_module("%s.settings" % extension)
except ImportError:
Logger.warning("Failed to load settings %s" % extension)
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
internal/gapicgen/cmd/genbot/github.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path"
"strings"
"time"
"cloud.google.com/go/internal/gapicgen/generator"
"github.com/google/go-github/v33/github"
"github.com/shurcooL/githubv4"
"golang.org/x/oauth2"
)
const (
gocloudBranchName = "regen_gocloud"
gocloudCommitTitle = "feat(all): auto-regenerate gapics"
gocloudCommitBody = `
This is an auto-generated regeneration of the gapic clients by
cloud.google.com/go/internal/gapicgen. Once the corresponding genproto PR is
submitted, genbot will update this PR with a newer dependency to the newer
version of genproto and assign reviewers to this PR.
If you have been assigned to review this PR, please:
- Ensure that the version of genproto in go.mod has been updated.
- Ensure that CI is passing. If it's failing, it requires your manual attention.
- Approve and submit this PR if you believe it's ready to ship.
`
genprotoBranchName = "regen_genproto"
genprotoCommitTitle = "feat(all): auto-regenerate .pb.go files"
genprotoCommitBody = `
This is an auto-generated regeneration of the .pb.go files by
cloud.google.com/go/internal/gapicgen. Once this PR is submitted, genbot will
update the corresponding PR to depend on the newer version of go-genproto, and
assign reviewers. Whilst this or any regen PR is open in go-genproto, genbot
will not create any more regeneration PRs. If all regen PRs are closed,
gapicgen will create a new set of regeneration PRs once per night.
If you have been assigned to review this PR, please:
- Ensure that CI is passing. If it's failing, it requires your manual attention.
- Approve and submit this PR if you believe it's ready to ship. That will prompt
genbot to assign reviewers to the google-cloud-go PR.
`
)
// githubReviewers is the list of github usernames that will be assigned to
// review the PRs.
//
// TODO(ndietz): Can we use github teams?
var githubReviewers = []string{"hongalex", "broady", "tritone", "codyoss", "tbpg"}
// PullRequest represents a GitHub pull request.
type PullRequest struct {
Author string
Title string
URL string
Created time.Time
IsOpen bool
Number int
Repo string
IsDraft bool
NodeID string
}
// GithubClient is a convenience wrapper around Github clients.
type GithubClient struct {
cV3 *github.Client
cV4 *githubv4.Client
// Username is the GitHub username. Read-only.
Username string
}
// NewGithubClient creates a new GithubClient.
func NewGithubClient(ctx context.Context, username, name, email, accessToken string) (*GithubClient, error) {
if err := setGitCreds(name, email, username, accessToken); err != nil {
return nil, err
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: accessToken},
)
tc := oauth2.NewClient(ctx, ts)
return &GithubClient{cV3: github.NewClient(tc), cV4: githubv4.NewClient(tc), Username: username}, nil
}
// SetGitCreds sets credentials for gerrit.
func setGitCreds(githubName, githubEmail, githubUsername, accessToken string) error {
u, err := user.Current()
if err != nil {
return err
}
gitCredentials := []byte(fmt.Sprintf("https://%s:%[email protected]", githubUsername, accessToken))
if err := ioutil.WriteFile(path.Join(u.HomeDir, ".git-credentials"), gitCredentials, 0644); err != nil {
return err
}
c := exec.Command("git", "config", "--global", "user.name", githubName)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin // Prevents "the input device is not a TTY" error.
c.Env = []string{
fmt.Sprintf("PATH=%s", os.Getenv("PATH")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("HOME=%s", os.Getenv("HOME")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
}
if err := c.Run(); err != nil {
return err
}
c = exec.Command("git", "config", "--global", "user.email", githubEmail)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin // Prevents "the input device is not a TTY" error.
c.Env = []string{
fmt.Sprintf("PATH=%s", os.Getenv("PATH")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("HOME=%s", os.Getenv("HOME")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
}
return c.Run()
}
// GetRegenPR finds the first regen pull request with the given status. Accepted
// statues are: open, closed, or all.
func (gc *GithubClient) GetRegenPR(ctx context.Context, repo string, status string) (*PullRequest, error) {
log.Printf("getting %v pull requests with status %q", repo, status)
// We don't bother paginating, because it hurts our requests quota and makes
// the page slower without a lot of value.
opt := &github.PullRequestListOptions{
ListOptions: github.ListOptions{PerPage: 50},
State: status,
}
prs, _, err := gc.cV3.PullRequests.List(ctx, "googleapis", repo, opt)
if err != nil {
return nil, err
}
for _, pr := range prs {
if !strings.Contains(pr.GetTitle(), "auto-regenerate") {
continue
}
if pr.GetUser().GetLogin() != gc.Username {
continue
}
return &PullRequest{
Author: pr.GetUser().GetLogin(),
Title: pr.GetTitle(),
URL: pr.GetHTMLURL(),
Created: pr.GetCreatedAt(),
IsOpen: pr.GetState() == "open",
Number: pr.GetNumber(),
Repo: repo,
IsDraft: pr.GetDraft(),
NodeID: pr.GetNodeID(),
}, nil
}
return nil, nil
}
// CreateGenprotoPR creates a PR for a given genproto change.
//
// hasCorrespondingPR indicates that there is a corresponding google-cloud-go PR.
func (gc *GithubClient) CreateGenprotoPR(ctx context.Context, genprotoDir string, hasCorrespondingPR bool, changes []*generator.ChangeInfo) (prNumber int, _ error) {
log.Println("creating genproto PR")
var sb strings.Builder
sb.WriteString(genprotoCommitBody)
if !hasCorrespondingPR {
sb.WriteString("\n\nThere is no corresponding google-cloud-go PR.\n")
sb.WriteString(formatChanges(changes, false))
}
body := sb.String()
c := exec.Command("/bin/bash", "-c", `
set -ex
git config credential.helper store # cache creds from ~/.git-credentials
git branch -D $BRANCH_NAME || true
git push -d origin $BRANCH_NAME || true
git add -A
git checkout -b $BRANCH_NAME
git commit -m "$COMMIT_TITLE" -m "$COMMIT_BODY"
git push origin $BRANCH_NAME
`)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin // Prevents "the input device is not a TTY" error.
c.Env = []string{
fmt.Sprintf("PATH=%s", os.Getenv("PATH")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("HOME=%s", os.Getenv("HOME")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("COMMIT_TITLE=%s", genprotoCommitTitle),
fmt.Sprintf("COMMIT_BODY=%s", body),
fmt.Sprintf("BRANCH_NAME=%s", genprotoBranchName),
}
c.Dir = genprotoDir
if err := c.Run(); err != nil {
return 0, err
}
head := fmt.Sprintf("googleapis:" + genprotoBranchName)
base := "master"
t := genprotoCommitTitle // Because we have to take the address.
pr, _, err := gc.cV3.PullRequests.Create(ctx, "googleapis", "go-genproto", &github.NewPullRequest{
Title: &t,
Body: &body,
Head: &head,
Base: &base,
})
if err != nil {
return 0, err
}
// Can't assign the submitter of the PR as a reviewer.
var reviewers []string
for _, r := range githubReviewers {
if r != *githubUsername {
reviewers = append(reviewers, r)
}
}
if _, _, err := gc.cV3.PullRequests.RequestReviewers(ctx, "googleapis", "go-genproto", pr.GetNumber(), github.ReviewersRequest{
Reviewers: reviewers,
}); err != nil {
return 0, err
}
log.Printf("creating genproto PR... done %s\n", pr.GetHTMLURL())
return pr.GetNumber(), nil
}
// CreateGocloudPR creates a PR for a given google-cloud-go change.
func (gc *GithubClient) CreateGocloudPR(ctx context.Context, gocloudDir string, genprotoPRNum int, changes []*generator.ChangeInfo) (prNumber int, _ error) {
log.Println("creating google-cloud-go PR")
var sb strings.Builder
var draft bool
sb.WriteString(gocloudCommitBody)
if genprotoPRNum > 0 {
sb.WriteString(fmt.Sprintf("\n\nCorresponding genproto PR: https://github.com/googleapis/go-genproto/pull/%d\n", genprotoPRNum))
draft = true
} else {
sb.WriteString("\n\nThere is no corresponding genproto PR.\n")
}
sb.WriteString(formatChanges(changes, true))
body := sb.String()
c := exec.Command("/bin/bash", "-c", `
set -ex
git config credential.helper store # cache creds from ~/.git-credentials
git branch -D $BRANCH_NAME || true
git push -d origin $BRANCH_NAME || true
git add -A
git checkout -b $BRANCH_NAME
git commit -m "$COMMIT_TITLE" -m "$COMMIT_BODY"
git push origin $BRANCH_NAME
`)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin // Prevents "the input device is not a TTY" error.
c.Env = []string{
fmt.Sprintf("PATH=%s", os.Getenv("PATH")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("HOME=%s", os.Getenv("HOME")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("COMMIT_TITLE=%s", gocloudCommitTitle),
fmt.Sprintf("COMMIT_BODY=%s", body),
fmt.Sprintf("BRANCH_NAME=%s", gocloudBranchName),
}
c.Dir = gocloudDir
if err := c.Run(); err != nil {
return 0, err
}
t := gocloudCommitTitle // Because we have to take the address.
pr, _, err := gc.cV3.PullRequests.Create(ctx, "googleapis", "google-cloud-go", &github.NewPullRequest{
Title: &t,
Body: &body,
Head: github.String(fmt.Sprintf("googleapis:" + gocloudBranchName)),
Base: github.String("master"),
Draft: github.Bool(draft),
})
if err != nil {
return 0, err
}
log.Printf("creating google-cloud-go PR... done %s\n", pr.GetHTMLURL())
return pr.GetNumber(), nil
}
// AmendGenprotoPR amends the given genproto PR with a link to the given
// google-cloud-go PR.
func (gc *GithubClient) AmendGenprotoPR(ctx context.Context, genprotoPRNum int, genprotoDir string, gocloudPRNum int, changes []*generator.ChangeInfo) error {
var body strings.Builder
body.WriteString(genprotoCommitBody)
body.WriteString(fmt.Sprintf("\n\nCorresponding google-cloud-go PR: googleapis/google-cloud-go#%d\n", gocloudPRNum))
body.WriteString(formatChanges(changes, false))
sBody := body.String()
c := exec.Command("/bin/bash", "-c", `
set -ex
git config credential.helper store # cache creds from ~/.git-credentials
git checkout $BRANCH_NAME
git commit --amend -m "$COMMIT_TITLE" -m "$COMMIT_BODY"
git push -f origin $BRANCH_NAME
`)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin // Prevents "the input device is not a TTY" error.
c.Env = []string{
fmt.Sprintf("PATH=%s", os.Getenv("PATH")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("HOME=%s", os.Getenv("HOME")), // TODO(deklerk): Why do we need to do this? Doesn't seem to be necessary in other exec.Commands.
fmt.Sprintf("COMMIT_TITLE=%s", genprotoCommitTitle),
fmt.Sprintf("COMMIT_BODY=%s", sBody),
fmt.Sprintf("BRANCH_NAME=%s", genprotoBranchName),
}
c.Dir = genprotoDir
if err := c.Run(); err != nil {
return err
}
_, _, err := gc.cV3.PullRequests.Edit(ctx, "googleapis", "go-genproto", genprotoPRNum, &github.PullRequest{
Body: &sBody,
})
return err
}
// MarkPRReadyForReview switches a draft pull request to a reviewable pull
// request.
func (gc *GithubClient) MarkPRReadyForReview(ctx context.Context, repo string, nodeID string) error {
var m struct {
MarkPullRequestReadyForReview struct {
PullRequest struct {
ID githubv4.ID
}
} `graphql:"markPullRequestReadyForReview(input: $input)"`
}
input := githubv4.MarkPullRequestReadyForReviewInput{
PullRequestID: nodeID,
}
if err := gc.cV4.Mutate(ctx, &m, input, nil); err != nil {
return err
}
return nil
}
func formatChanges(changes []*generator.ChangeInfo, onlyGapicChanges bool) string {
if len(changes) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("\nChanges:\n")
for _, c := range changes {
if onlyGapicChanges && !c.HasGapicChanges {
continue
}
sb.WriteString("- ")
ss := strings.Split(c.Body, "\n")
for i, s := range ss {
if i == 0 {
sb.WriteString(fmt.Sprintf("%s\n", s))
continue
}
if s == "" {
sb.WriteString("\n")
continue
}
sb.WriteString(fmt.Sprintf(" %s\n", s))
}
sb.WriteString("\n")
}
return sb.String()
}
| [
"\"PATH\"",
"\"HOME\"",
"\"PATH\"",
"\"HOME\"",
"\"PATH\"",
"\"HOME\"",
"\"PATH\"",
"\"HOME\"",
"\"PATH\"",
"\"HOME\""
] | [] | [
"HOME",
"PATH"
] | [] | ["HOME", "PATH"] | go | 2 | 0 | |
pkg/volume/csi/csi_mounter_test.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"reflect"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
csiapi "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
var (
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPod = "test-pod"
testPodUID = types.UID("test-pod")
testAccount = "test-service-account"
)
func TestMounterGetPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "spec-0", "/mount")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "test.spec.1", "/mount")),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mounter, err := plug.NewMounter(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
csiMounter := mounter.(*csiMountMgr)
path := csiMounter.GetPath()
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, podInfoEnabled)()
tests := []struct {
name string
driver string
volumeContext map[string]string
expectedVolumeContext map[string]string
}{
{
name: "no pod info",
driver: "no-info",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "no CSIDriver -> no pod info",
driver: "unknown-driver",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "CSIDriver with PodInfoRequiredOnMount=nil -> no pod info",
driver: "nil",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "no pod info -> keep existing volumeContext",
driver: "no-info",
volumeContext: map[string]string{"foo": "bar"},
expectedVolumeContext: map[string]string{"foo": "bar"},
},
{
name: "add pod info",
driver: "info",
volumeContext: nil,
expectedVolumeContext: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
{
name: "add pod info -> keep existing volumeContext",
driver: "info",
volumeContext: map[string]string{"foo": "bar"},
expectedVolumeContext: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
}
emptyPodMountInfoVersion := ""
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
klog.Infof("Starting test %s", test.name)
fakeClient := fakeclient.NewSimpleClientset()
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("no-info", &emptyPodMountInfoVersion, nil),
getCSIDriver("info", ¤tPodInfoMountVersion, nil),
getCSIDriver("nil", nil, nil),
)
plug, tmpDir := newTestPlugin(t, fakeClient, fakeCSIClient)
defer os.RemoveAll(tmpDir)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return plug.csiDriverInformer.Informer().HasSynced(), nil
})
}
registerFakePlugin(test.driver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, test.driver, testVol)
pv.Spec.CSI.VolumeAttributes = test.volumeContext
pv.Spec.MountOptions = []string{"foo=bar", "baz=qux"}
pvName := pv.GetName()
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
Spec: api.PodSpec{
ServiceAccountName: testAccount,
},
},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
},
}
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
vol, ok := pubs[csiMounter.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if vol.Path != csiMounter.GetPath() {
t.Errorf("csi server expected path %s, got %s", csiMounter.GetPath(), vol.Path)
}
if !reflect.DeepEqual(vol.MountFlags, pv.Spec.MountOptions) {
t.Errorf("csi server expected mount options %v, got %v", pv.Spec.MountOptions, vol.MountFlags)
}
if podInfoEnabled {
if !reflect.DeepEqual(vol.VolumeContext, test.expectedVolumeContext) {
t.Errorf("csi server expected volumeContext %+v, got %+v", test.expectedVolumeContext, vol.VolumeContext)
}
} else {
// CSIPodInfo feature is disabled, we expect no modifications to volumeContext.
if !reflect.DeepEqual(vol.VolumeContext, test.volumeContext) {
t.Errorf("csi server expected volumeContext %+v, got %+v", test.volumeContext, vol.VolumeContext)
}
}
})
}
}
func TestMounterSetUp(t *testing.T) {
t.Run("WithCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, true)
})
t.Run("WithoutCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, false)
})
}
func TestMounterSetUpWithFSGroup(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset()
plug, tmpDir := newTestPlugin(t, fakeClient, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
accessModes []api.PersistentVolumeAccessMode
readOnly bool
fsType string
setFsGroup bool
fsGroup int64
}{
{
name: "default fstype, with no fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
},
{
name: "default fstype with fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteMany,
api.ReadOnlyMany,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: true,
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
}
for i, tc := range testCases {
t.Logf("Running test %s", tc.name)
volName := fmt.Sprintf("test-vol-%d", i)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, volName)
pv.Spec.AccessModes = tc.accessModes
pvName := pv.GetName()
spec := volume.NewSpecFromPersistentVolume(pv, tc.readOnly)
if tc.fsType != "" {
spec.PersistentVolume.Spec.CSI.FSType = tc.fsType
}
mounter, err := plug.NewMounter(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", pvName)
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Errorf("failed to setup VolumeAttachment: %v", err)
continue
}
// Mounter.SetUp()
var fsGroupPtr *int64
if tc.setFsGroup {
fsGroup := tc.fsGroup
fsGroupPtr = &fsGroup
}
if err := csiMounter.SetUp(fsGroupPtr); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != len(tc.fsType) {
t.Errorf("file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
}
}
func TestUnmounterTeardown(t *testing.T) {
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file prior to unmount
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
// do a fake local mount
diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host)
if err := diskMounter.FormatAndMount("/fake/device", dir, "testfs", nil); err != nil {
t.Errorf("failed to mount dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
path.Dir(dir),
volDataFileName,
map[string]string{
volDataKey.specVolID: pv.ObjectMeta.Name,
volDataKey.driverName: testDriver,
volDataKey.volHandle: testVol,
},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
if err != nil {
t.Fatalf("failed to make a new Unmounter: %v", err)
}
csiUnmounter := unmounter.(*csiMountMgr)
csiUnmounter.csiClient = setupClient(t, true)
err = csiUnmounter.TearDownAt(dir)
if err != nil {
t.Fatal(err)
}
// ensure csi client call
pubs := csiUnmounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if _, ok := pubs[csiUnmounter.volumeID]; ok {
t.Error("csi server may not have received NodeUnpublishVolume call")
}
}
func TestSaveVolumeData(t *testing.T) {
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
data map[string]string
shouldFail bool
}{
{name: "test with data ok", data: map[string]string{"key0": "val0", "_key1": "val1", "key2": "val2"}},
{name: "test with data ok 2 ", data: map[string]string{"_key0_": "val0", "&key1": "val1", "key2": "val2"}},
}
for i, tc := range testCases {
t.Logf("test case: %s", tc.name)
specVolID := fmt.Sprintf("spec-volid-%d", i)
mountDir := path.Join(getTargetPath(testPodUID, specVolID, plug.host), "/mount")
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
}
err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data)
if !tc.shouldFail && err != nil {
t.Errorf("unexpected failure: %v", err)
}
// did file get created
dataDir := getTargetPath(testPodUID, specVolID, plug.host)
file := path.Join(dataDir, volDataFileName)
if _, err := os.Stat(file); err != nil {
t.Errorf("failed to create data dir: %v", err)
}
// validate content
data, err := ioutil.ReadFile(file)
if !tc.shouldFail && err != nil {
t.Errorf("failed to read data file: %v", err)
}
jsonData := new(bytes.Buffer)
if err := json.NewEncoder(jsonData).Encode(tc.data); err != nil {
t.Errorf("failed to encode json: %v", err)
}
if string(data) != jsonData.String() {
t.Errorf("expecting encoded data %v, got %v", string(data), jsonData)
}
}
}
func getCSIDriver(name string, podInfoMountVersion *string, attachable *bool) *csiapi.CSIDriver {
return &csiapi.CSIDriver{
ObjectMeta: meta.ObjectMeta{
Name: name,
},
Spec: csiapi.CSIDriverSpec{
PodInfoOnMountVersion: podInfoMountVersion,
AttachRequired: attachable,
},
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
06-translate-text/Python/text-translation/text-translation.py | from dotenv import load_dotenv
import os
import requests, json
def main():
global translator_endpoint
global cog_key
global cog_region
try:
# Get Configuration Settings
load_dotenv()
cog_key = os.getenv('COG_SERVICE_KEY')
cog_region = os.getenv('COG_SERVICE_REGION')
translator_endpoint = 'https://api.cognitive.microsofttranslator.com'
# Analyze each text file in the reviews folder
reviews_folder = 'reviews'
for file_name in os.listdir(reviews_folder):
# Read the file contents
print('\n-------------\n' + file_name)
text = open(os.path.join(reviews_folder, file_name), encoding='utf8').read()
print('\n' + text)
# Detect the language
language = GetLanguage(text)
print('Language:',language)
# Translate if not already English
if language != 'en':
translation = Translate(text, language)
print("\nTranslation:\n{}".format(translation))
except Exception as ex:
print(ex)
def GetLanguage(text):
# Default language is English
##language = 'en'
# Use the Translator detect function
# Use the Translator detect function
path = '/detect'
url = translator_endpoint + path
# Build the request
params = {
'api-version': '3.0'
}
headers = {
'Ocp-Apim-Subscription-Key': cog_key,
'Ocp-Apim-Subscription-Region': cog_region,
'Content-type': 'application/json'
}
body = [{
'text': text
}]
# Send the request and get response
request = requests.post(url, params=params, headers=headers, json=body)
response = request.json()
# Parse JSON array and get language
language = response[0]["language"]
# Return the language
return language
def Translate(text, source_language):
translation = ''
# Use the Translator translate function
# Use the Translator translate function
path = '/translate'
url = translator_endpoint + path
# Build the request
params = {
'api-version': '3.0',
'from': source_language,
'to': ['en']
}
headers = {
'Ocp-Apim-Subscription-Key': cog_key,
'Ocp-Apim-Subscription-Region': cog_region,
'Content-type': 'application/json'
}
body = [{
'text': text
}]
# Send the request and get response
request = requests.post(url, params=params, headers=headers, json=body)
response = request.json()
# Parse JSON array and get translation
translation = response[0]["translations"][0]["text"]
# Return the translation
return translation
if __name__ == "__main__":
main() | [] | [] | [
"COG_SERVICE_REGION",
"COG_SERVICE_KEY"
] | [] | ["COG_SERVICE_REGION", "COG_SERVICE_KEY"] | python | 2 | 0 | |
scripts/delete_user.py | #!/usr/bin/env python
import os
import sys
import subprocess
from dataclasses import dataclass
@dataclass
class Settings:
kadmin_bin: str
service: str
realm: str
keytab_file: str
def delete_user(username):
subprocess.run(
[
settings.kadmin_bin,
'-r', settings.realm,
'-p', settings.service,
'-kt', settings.keytab_file,
'delete_principal', username,
],
timeout=1,
check=True,
)
def main():
print('Username: ', end='', flush=True)
username = sys.stdin.readline().strip()
delete_user(username)
print(f'Deleted user: {username}')
if __name__ == '__main__':
settings = Settings(
kadmin_bin='kadmin',
service=os.getenv('AUTH_SERVICE_NAME', 'http/[email protected]'),
realm=os.getenv('AUTH_SERVICE_REALM', 'EXAMPLE.COM'),
keytab_file=os.getenv('KRB5_KTNAME', '/etc/krb5.keytab'),
)
main()
| [] | [] | [
"AUTH_SERVICE_NAME",
"AUTH_SERVICE_REALM",
"KRB5_KTNAME"
] | [] | ["AUTH_SERVICE_NAME", "AUTH_SERVICE_REALM", "KRB5_KTNAME"] | python | 3 | 0 | |
examples/v2/ws-update-order/main.go | package main
import (
"log"
"os"
"time"
"context"
"github.com/venarius/bitfinex-api-go/v2"
"github.com/venarius/bitfinex-api-go/v2/websocket"
)
func SubmitTestOrder(c *websocket.Client) {
log.Printf("Submitting new order")
err := c.SubmitOrder(context.Background(), &bitfinex.OrderNewRequest{
Symbol: "tBTCUSD",
CID: time.Now().Unix() / 1000,
Amount: 0.02,
Type: "EXCHANGE LIMIT",
Price: 5000,
})
if err != nil {
log.Fatal(err)
}
}
func UpdateTestOrder(orderId int64, c *websocket.Client) {
log.Printf("Updating order")
err := c.SubmitUpdateOrder(context.Background(), &bitfinex.OrderUpdateRequest{
ID: orderId,
Amount: 0.04,
})
if err != nil {
log.Fatal(err)
}
}
func main() {
key := os.Getenv("BFX_KEY")
secret := os.Getenv("BFX_SECRET")
p := websocket.NewDefaultParameters()
p.URL = "wss://test.bitfinex.com/ws/2"
c := websocket.NewWithParams(p).Credentials(key, secret)
err := c.Connect()
if err != nil {
log.Fatalf("connecting authenticated websocket: %s", err)
}
defer c.Close()
// Begin listening to incoming messages
for obj := range c.Listen() {
switch obj.(type) {
case error:
log.Fatalf("channel closed: %s", obj)
break
case *websocket.AuthEvent:
// on authorize create new order
SubmitTestOrder(c)
case *bitfinex.OrderNew:
// new order received so update it
id := obj.(*bitfinex.OrderNew).ID
UpdateTestOrder(id, c)
default:
log.Printf("MSG RECV: %#v", obj)
}
}
time.Sleep(time.Second * 10)
}
| [
"\"BFX_KEY\"",
"\"BFX_SECRET\""
] | [] | [
"BFX_SECRET",
"BFX_KEY"
] | [] | ["BFX_SECRET", "BFX_KEY"] | go | 2 | 0 | |
tests/simd_test.go | package tests
import (
"context"
. "github.com/modern-go/amd64"
"github.com/modern-go/test"
"github.com/modern-go/test/must"
"strconv"
"testing"
"os"
)
func Test_simd(t *testing.T) {
t.Run("end to end", test.Case(func(ctx context.Context) {
if os.Getenv("TRAVIS_BUILD_DIR") != "" {
return
}
asm := &Assembler{}
asm.Assemble(
MOV, RDI, QWORD(RSP, 8),
MOV, RSI, QWORD(RSP, 16),
MOVD, XMM0, EDI,
VPBROADCASTD, XMM0, XMM0,
VPCMPEQD, XMM1, XMM0, XMMWORD(RSI, 0),
VPCMPEQD, XMM2, XMM0, XMMWORD(RSI, 0x10),
VPCMPEQD, XMM3, XMM0, XMMWORD(RSI, 0x20),
VPCMPEQD, XMM4, XMM0, XMMWORD(RSI, 0x30),
VPACKSSDW, XMM1, XMM1, XMM2,
VPACKSSDW, XMM2, XMM3, XMM4,
VPACKSSWB, XMM1, XMM1, XMM2,
VPMOVMSKB, ECX, XMM1,
// vpcmpeqd xmm1,xmm0,XMMWORD PTR [rsi+0x40]
VPCMPEQD, XMM1, XMM0, XMMWORD(RSI, 0x40),
// vpcmpeqd xmm2,xmm0,XMMWORD PTR [rsi+0x50]
VPCMPEQD, XMM2, XMM0, XMMWORD(RSI, 0x50),
// vpcmpeqd xmm3,xmm0,XMMWORD PTR [rsi+0x60]
VPCMPEQD, XMM3, XMM0, XMMWORD(RSI, 0x60),
// vpcmpeqd xmm4,xmm0,XMMWORD PTR [rsi+0x70]
VPCMPEQD, XMM4, XMM0, XMMWORD(RSI, 0x70),
VPACKSSDW, XMM1, XMM1, XMM2,
VPACKSSDW, XMM2, XMM3, XMM4,
VPACKSSWB, XMM1, XMM1, XMM2,
// vpmovmskb eax,xmm1
VPMOVMSKB, EAX, XMM1,
// vpcmpeqd xmm1,xmm0,XMMWORD PTR [rsi+0x80]
VPCMPEQD, XMM1, XMM0, XMMWORD(RSI, 0x80),
// vpcmpeqd xmm2,xmm0,XMMWORD PTR [rsi+0x90]
VPCMPEQD, XMM2, XMM0, XMMWORD(RSI, 0x90),
// vpcmpeqd xmm3,xmm0,XMMWORD PTR [rsi+0xa0]
VPCMPEQD, XMM3, XMM0, XMMWORD(RSI, 0xa0),
// vpcmpeqd xmm4,xmm0,XMMWORD PTR [rsi+0xb0]
VPCMPEQD, XMM4, XMM0, XMMWORD(RSI, 0xb0),
VPACKSSDW, XMM1, XMM1, XMM2,
VPACKSSDW, XMM2, XMM3, XMM4,
VPACKSSWB, XMM1, XMM1, XMM2,
// vpmovmskb edx,xmm1
VPMOVMSKB, EDX, XMM1,
// vpcmpeqd xmm1,xmm0,XMMWORD PTR [rsi+0xc0]
VPCMPEQD, XMM1, XMM0, XMMWORD(RSI, 0xc0),
// vpcmpeqd xmm2,xmm0,XMMWORD PTR [rsi+0xd0]
VPCMPEQD, XMM2, XMM0, XMMWORD(RSI, 0xd0),
// vpcmpeqd xmm3,xmm0,XMMWORD PTR [rsi+0xe0]
VPCMPEQD, XMM3, XMM0, XMMWORD(RSI, 0xe0),
// vpcmpeqd xmm0,xmm0,XMMWORD PTR [rsi+0xf0]
VPCMPEQD, XMM0, XMM0, XMMWORD(RSI, 0xf0),
// vpackssdw xmm1,xmm1,xmm2
VPACKSSDW, XMM1, XMM1, XMM2,
// vpackssdw xmm0,xmm3,xmm0
VPACKSSDW, XMM0, XMM3, XMM0,
// vpacksswb xmm0,xmm1,xmm0
VPACKSSWB, XMM0, XMM1, XMM0,
// vpmovmskb esi,xmm0
VPMOVMSKB, ESI, XMM0,
SHL, RSI, IMM(0x30),
SHL, RDX, IMM(0x20),
SHL, RAX, IMM(0x10),
OR, RAX, RCX,
OR, RAX, RDX,
OR, RAX, RSI,
// mov QWORD PTR [rsp+0x18],rax
MOV, QWORD(RSP, 0x18), RAX,
RET,
)
must.Nil(asm.Error)
must.Equal([]byte{
0x48, 0x8B, 0x7c, 0x24, 0x08, // mov rdi,QWORD PTR [rsp+0x8]
0x48, 0x8B, 0x74, 0x24, 0x10, // mov rsi,QWORD PTR [rsp+0x10]
0xc5, 0xf9, 0x6e, 0xc7, // vmovd xmm0,edi
0xc4, 0xe2, 0x79, 0x58, 0xc0, // vpbroadcastd xmm0,xmm0
0xc5, 0xf9, 0x76, 0x0e, // vpcmpeqd xmm1, xmm0, xmmword ptr [rsi]
0xc5, 0xf9, 0x76, 0x56, 0x10, // vpcmpeqd xmm2,xmm0,XMMWORD PTR [rsi+0x10]
0xc5, 0xf9, 0x76, 0x5e, 0x20, // vpcmpeqd xmm3,xmm0,XMMWORD PTR [rsi+0x20]
0xc5, 0xf9, 0x76, 0x66, 0x30, // vpcmpeqd xmm4,xmm0,XMMWORD PTR [rsi+0x30]
0xc5, 0xf1, 0x6b, 0xca, // vpackssdw xmm1, xmm1, xmm2
0xc5, 0xe1, 0x6b, 0xd4, // vpackssdw xmm2, xmm3, xmm4
0xc5, 0xf1, 0x63, 0xca, // vpacksswb xmm1, xmm1, xmm2
0xc5, 0xf9, 0xd7, 0xc9, // vpmovmskb ecx, xmm1
0xc5, 0xf9, 0x76, 0x4e, 0x40, // vpcmpeqd xmm1,xmm0,XMMWORD PTR [rsi+0x40]
0xc5, 0xf9, 0x76, 0x56, 0x50, // vpcmpeqd xmm2,xmm0,XMMWORD PTR [rsi+0x50]
0xc5, 0xf9, 0x76, 0x5e, 0x60, // vpcmpeqd xmm3,xmm0,XMMWORD PTR [rsi+0x60]
0xc5, 0xf9, 0x76, 0x66, 0x70, // vpcmpeqd xmm4,xmm0,XMMWORD PTR [rsi+0x70]
0xc5, 0xf1, 0x6b, 0xca, // vpackssdw xmm1, xmm1, xmm2
0xc5, 0xe1, 0x6b, 0xd4, // vpackssdw xmm2, xmm3, xmm4
0xc5, 0xf1, 0x63, 0xca, // vpacksswb xmm1, xmm1, xmm2
0xc5, 0xf9, 0xd7, 0xc1, // vpmovmskb eax,xmm1
0xc5, 0xf9, 0x76, 0x8e,
0x80, 0x00, 0x00, 0x00, // vpcmpeqd xmm1,xmm0,XMMWORD PTR [rsi+0x80]
0xc5, 0xf9, 0x76, 0x96,
0x90, 0x00, 0x00, 0x00, // vpcmpeqd xmm2,xmm0,XMMWORD PTR [rsi+0x90]
0xc5, 0xf9, 0x76, 0x9e,
0xa0, 0x00, 0x00, 0x00, // vpcmpeqd xmm3,xmm0,XMMWORD PTR [rsi+0xa0]
0xc5, 0xf9, 0x76, 0xa6,
0xb0, 0x00, 0x00, 0x00, // vpcmpeqd xmm4,xmm0,XMMWORD PTR [rsi+0xb0]
0xc5, 0xf1, 0x6b, 0xca, // vpackssdw xmm1, xmm1, xmm2
0xc5, 0xe1, 0x6b, 0xd4, // vpackssdw xmm2, xmm3, xmm4
0xc5, 0xf1, 0x63, 0xca, // vpacksswb xmm1, xmm1, xmm2
0xc5, 0xf9, 0xd7, 0xd1, // vpmovmskb edx,xmm1
0xc5, 0xf9, 0x76, 0x8e,
0xc0, 0x00, 0x00, 0x00, // vpcmpeqd xmm1,xmm0,XMMWORD PTR [rsi+0xc0]
0xc5, 0xf9, 0x76, 0x96,
0xd0, 0x00, 0x00, 0x00, // vpcmpeqd xmm2,xmm0,XMMWORD PTR [rsi+0xd0]
0xc5, 0xf9, 0x76, 0x9e,
0xe0, 0x00, 0x00, 0x00, // vpcmpeqd xmm3,xmm0,XMMWORD PTR [rsi+0xe0]
0xc5, 0xf9, 0x76, 0x86,
0xf0, 0x00, 0x00, 0x00, // vpcmpeqd xmm0,xmm0,XMMWORD PTR [rsi+0xf0]
0xc5, 0xf1, 0x6b, 0xca, // vpackssdw xmm1,xmm1,xmm2
0xc5, 0xe1, 0x6b, 0xc0, // vpackssdw xmm0,xmm3,xmm0
0xc5, 0xf1, 0x63, 0xc0, // vpacksswb xmm0,xmm1,xmm0
0xc5, 0xf9, 0xd7, 0xf0, // vpmovmskb esi,xmm0
0x48, 0xc1, 0xe6, 0x30, // shl rsi, 0x30
0x48, 0xc1, 0xe2, 0x20, // shl rdx, 0x20
0x48, 0xc1, 0xe0, 0x10, // shl rax, 0x10
0x48, 0x09, 0xc8, // or rax, rcx
0x48, 0x09, 0xd0, // or rax, rdx,
0x48, 0x09, 0xf0, // or rax, rsi,
0x48, 0x89, 0x44, 0x24, 0x18, // mov QWORD PTR [rsp+0x18],rax
0xc3, //ret
},
asm.Buffer)
var compareEqual func(key uint32, elements *[64]uint32) (ret uint64)
asm.MakeFunc(&compareEqual)
must.Nil(asm.Error)
v1 := [64]uint32{
3, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 3}
ret := compareEqual(3, &v1)
if "1100000000000000000000000000000000000000000000000000000000001001" != strconv.FormatUint(uint64(ret), 2) {
t.Fail()
}
}))
}
| [
"\"TRAVIS_BUILD_DIR\""
] | [] | [
"TRAVIS_BUILD_DIR"
] | [] | ["TRAVIS_BUILD_DIR"] | go | 1 | 0 | |
vips.go | package bimg
/*
#cgo pkg-config: vips
#include "vips.h"
*/
import "C"
import (
"errors"
"fmt"
"math"
"os"
"runtime"
"strings"
"sync"
"unsafe"
)
// VipsVersion exposes the current libvips semantic version
const VipsVersion = string(C.VIPS_VERSION)
// VipsMajorVersion exposes the current libvips major version number
const VipsMajorVersion = int(C.VIPS_MAJOR_VERSION)
// VipsMinorVersion exposes the current libvips minor version number
const VipsMinorVersion = int(C.VIPS_MINOR_VERSION)
const (
maxCacheMem = 100 * 1024 * 1024
maxCacheSize = 500
)
var (
m sync.Mutex
initialized bool
)
// VipsMemoryInfo represents the memory stats provided by libvips.
type VipsMemoryInfo struct {
Memory int64
MemoryHighwater int64
Allocations int64
}
// vipsSaveOptions represents the internal option used to talk with libvips.
type vipsSaveOptions struct {
Quality int
Compression int
Type ImageType
Interlace bool
NoProfile bool
StripMetadata bool
Lossless bool
OutputICC string // Absolute path to the output ICC profile
Interpretation Interpretation
}
type vipsWatermarkOptions struct {
Width C.int
DPI C.int
Margin C.int
NoReplicate C.int
Opacity C.float
Background [3]C.double
}
type vipsWatermarkImageOptions struct {
Left C.int
Top C.int
Opacity C.float
}
type vipsWatermarkTextOptions struct {
Text *C.char
Font *C.char
}
// Initialize is used to explicitly start libvips in thread-safe way.
// Only call this function if you have previously turned off libvips.
func Initialize() {
if C.VIPS_MAJOR_VERSION <= 7 && C.VIPS_MINOR_VERSION < 40 {
panic("unsupported libvips version!")
}
m.Lock()
runtime.LockOSThread()
defer m.Unlock()
defer runtime.UnlockOSThread()
err := C.vips_init(C.CString("bimg"))
if err != 0 {
panic("unable to start vips!")
}
// Set libvips cache params
C.vips_cache_set_max_mem(maxCacheMem)
C.vips_cache_set_max(maxCacheSize)
// Define a custom thread concurrency limit in libvips (this may generate thread-unsafe issues)
// See: https://github.com/jcupitt/libvips/issues/261#issuecomment-92850414
if os.Getenv("VIPS_CONCURRENCY") == "" {
C.vips_concurrency_set(1)
}
// Enable libvips cache tracing
if os.Getenv("VIPS_TRACE") != "" {
C.vips_enable_cache_set_trace()
}
initialized = true
}
// Shutdown is used to shutdown libvips in a thread-safe way.
// You can call this to drop caches as well.
// If libvips was already initialized, the function is no-op
func Shutdown() {
m.Lock()
defer m.Unlock()
if initialized {
C.vips_shutdown()
initialized = false
}
}
// VipsCacheSetMaxMem Sets the maximum amount of tracked memory allowed before the vips operation cache
// begins to drop entries.
func VipsCacheSetMaxMem(maxCacheMem int) {
C.vips_cache_set_max_mem(C.size_t(maxCacheMem))
}
// VipsCacheSetMax sets the maximum number of operations to keep in the vips operation cache.
func VipsCacheSetMax(maxCacheSize int) {
C.vips_cache_set_max(C.int(maxCacheSize))
}
// VipsCacheDropAll drops the vips operation cache, freeing the allocated memory.
func VipsCacheDropAll() {
C.vips_cache_drop_all()
}
// VipsDebugInfo outputs to stdout libvips collected data. Useful for debugging.
func VipsDebugInfo() {
C.im__print_all()
}
// VipsMemory gets memory info stats from libvips (cache size, memory allocs...)
func VipsMemory() VipsMemoryInfo {
return VipsMemoryInfo{
Memory: int64(C.vips_tracked_get_mem()),
MemoryHighwater: int64(C.vips_tracked_get_mem_highwater()),
Allocations: int64(C.vips_tracked_get_allocs()),
}
}
// VipsIsTypeSupported returns true if the given image type
// is supported by the current libvips compilation.
func VipsIsTypeSupported(t ImageType) bool {
if t == JPEG {
return int(C.vips_type_find_bridge(C.JPEG)) != 0
}
if t == WEBP {
return int(C.vips_type_find_bridge(C.WEBP)) != 0
}
if t == PNG {
return int(C.vips_type_find_bridge(C.PNG)) != 0
}
if t == GIF {
return int(C.vips_type_find_bridge(C.GIF)) != 0
}
if t == PDF {
return int(C.vips_type_find_bridge(C.PDF)) != 0
}
if t == SVG {
return int(C.vips_type_find_bridge(C.SVG)) != 0
}
if t == TIFF {
return int(C.vips_type_find_bridge(C.TIFF)) != 0
}
if t == MAGICK {
return int(C.vips_type_find_bridge(C.MAGICK)) != 0
}
return false
}
// VipsIsTypeSupportedSave returns true if the given image type
// is supported by the current libvips compilation for the
// save operation.
func VipsIsTypeSupportedSave(t ImageType) bool {
if t == JPEG {
return int(C.vips_type_find_save_bridge(C.JPEG)) != 0
}
if t == WEBP {
return int(C.vips_type_find_save_bridge(C.WEBP)) != 0
}
if t == PNG {
return int(C.vips_type_find_save_bridge(C.PNG)) != 0
}
if t == TIFF {
return int(C.vips_type_find_save_bridge(C.TIFF)) != 0
}
return false
}
func vipsExifOrientation(image *C.VipsImage) int {
return int(C.vips_exif_orientation(image))
}
func vipsHasAlpha(image *C.VipsImage) bool {
return int(C.has_alpha_channel(image)) > 0
}
func vipsHasProfile(image *C.VipsImage) bool {
return int(C.has_profile_embed(image)) > 0
}
func vipsWindowSize(name string) float64 {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
return float64(C.interpolator_window_size(cname))
}
func vipsSpace(image *C.VipsImage) string {
return C.GoString(C.vips_enum_nick_bridge(image))
}
func vipsRotate(image *C.VipsImage, angle Angle) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_rotate_bimg(image, &out, C.int(angle))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsFlip(image *C.VipsImage, direction Direction) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_flip_bridge(image, &out, C.int(direction))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsZoom(image *C.VipsImage, zoom int) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_zoom_bridge(image, &out, C.int(zoom), C.int(zoom))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsWatermark(image *C.VipsImage, w Watermark) (*C.VipsImage, error) {
var out *C.VipsImage
// Defaults
noReplicate := 0
if w.NoReplicate {
noReplicate = 1
}
text := C.CString(w.Text)
font := C.CString(w.Font)
background := [3]C.double{C.double(w.Background.R), C.double(w.Background.G), C.double(w.Background.B)}
textOpts := vipsWatermarkTextOptions{text, font}
opts := vipsWatermarkOptions{C.int(w.Width), C.int(w.DPI), C.int(w.Margin), C.int(noReplicate), C.float(w.Opacity), background}
defer C.free(unsafe.Pointer(text))
defer C.free(unsafe.Pointer(font))
err := C.vips_watermark(image, &out, (*C.WatermarkTextOptions)(unsafe.Pointer(&textOpts)), (*C.WatermarkOptions)(unsafe.Pointer(&opts)))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsRead(buf []byte) (*C.VipsImage, ImageType, error) {
var image *C.VipsImage
imageType := vipsImageType(buf)
if imageType == UNKNOWN {
return nil, UNKNOWN, errors.New("Unsupported image format")
}
length := C.size_t(len(buf))
imageBuf := unsafe.Pointer(&buf[0])
err := C.vips_init_image(imageBuf, length, C.int(imageType), &image)
if err != 0 {
return nil, UNKNOWN, catchVipsError()
}
return image, imageType, nil
}
func vipsColourspaceIsSupportedBuffer(buf []byte) (bool, error) {
image, _, err := vipsRead(buf)
if err != nil {
return false, err
}
C.g_object_unref(C.gpointer(image))
return vipsColourspaceIsSupported(image), nil
}
func vipsColourspaceIsSupported(image *C.VipsImage) bool {
return int(C.vips_colourspace_issupported_bridge(image)) == 1
}
func vipsInterpretationBuffer(buf []byte) (Interpretation, error) {
image, _, err := vipsRead(buf)
if err != nil {
return InterpretationError, err
}
C.g_object_unref(C.gpointer(image))
return vipsInterpretation(image), nil
}
func vipsInterpretation(image *C.VipsImage) Interpretation {
return Interpretation(C.vips_image_guess_interpretation_bridge(image))
}
func vipsFlattenBackground(image *C.VipsImage, background Color) (*C.VipsImage, error) {
var outImage *C.VipsImage
backgroundC := [3]C.double{
C.double(background.R),
C.double(background.G),
C.double(background.B),
}
if vipsHasAlpha(image) {
err := C.vips_flatten_background_brigde(image, &outImage,
backgroundC[0], backgroundC[1], backgroundC[2])
if int(err) != 0 {
return nil, catchVipsError()
}
C.g_object_unref(C.gpointer(image))
image = outImage
}
return image, nil
}
func vipsPreSave(image *C.VipsImage, o *vipsSaveOptions) (*C.VipsImage, error) {
var outImage *C.VipsImage
// Remove ICC profile metadata
if o.NoProfile {
C.remove_profile(image)
}
// Use a default interpretation and cast it to C type
if o.Interpretation == 0 {
o.Interpretation = InterpretationSRGB
}
interpretation := C.VipsInterpretation(o.Interpretation)
// Apply the proper colour space
if vipsColourspaceIsSupported(image) {
err := C.vips_colourspace_bridge(image, &outImage, interpretation)
if int(err) != 0 {
return nil, catchVipsError()
}
image = outImage
}
if o.OutputICC != "" && vipsHasProfile(image) {
outputIccPath := C.CString(o.OutputICC)
defer C.free(unsafe.Pointer(outputIccPath))
err := C.vips_icc_transform_bridge(image, &outImage, outputIccPath)
if int(err) != 0 {
return nil, catchVipsError()
}
C.g_object_unref(C.gpointer(image))
image = outImage
}
return image, nil
}
func vipsSave(image *C.VipsImage, o vipsSaveOptions) ([]byte, error) {
defer C.g_object_unref(C.gpointer(image))
tmpImage, err := vipsPreSave(image, &o)
if err != nil {
return nil, err
}
// When an image has an unsupported color space, vipsPreSave
// returns the pointer of the image passed to it unmodified.
// When this occurs, we must take care to not dereference the
// original image a second time; we may otherwise erroneously
// free the object twice.
if tmpImage != image {
defer C.g_object_unref(C.gpointer(tmpImage))
}
length := C.size_t(0)
saveErr := C.int(0)
interlace := C.int(boolToInt(o.Interlace))
quality := C.int(o.Quality)
strip := C.int(boolToInt(o.StripMetadata))
lossless := C.int(boolToInt(o.Lossless))
if o.Type != 0 && !IsTypeSupportedSave(o.Type) {
return nil, fmt.Errorf("VIPS cannot save to %#v", ImageTypes[o.Type])
}
var ptr unsafe.Pointer
switch o.Type {
case WEBP:
saveErr = C.vips_webpsave_bridge(tmpImage, &ptr, &length, strip, quality, lossless)
case PNG:
saveErr = C.vips_pngsave_bridge(tmpImage, &ptr, &length, strip, C.int(o.Compression), quality, interlace)
case TIFF:
saveErr = C.vips_tiffsave_bridge(tmpImage, &ptr, &length)
default:
saveErr = C.vips_jpegsave_bridge(tmpImage, &ptr, &length, strip, quality, interlace)
}
if int(saveErr) != 0 {
return nil, catchVipsError()
}
buf := C.GoBytes(ptr, C.int(length))
// Clean up
C.g_free(C.gpointer(ptr))
C.vips_error_clear()
return buf, nil
}
func getImageBuffer(image *C.VipsImage) ([]byte, error) {
var ptr unsafe.Pointer
length := C.size_t(0)
interlace := C.int(0)
quality := C.int(100)
err := C.int(0)
err = C.vips_jpegsave_bridge(image, &ptr, &length, 1, quality, interlace)
if int(err) != 0 {
return nil, catchVipsError()
}
defer C.g_free(C.gpointer(ptr))
defer C.vips_error_clear()
return C.GoBytes(ptr, C.int(length)), nil
}
func vipsExtract(image *C.VipsImage, left, top, width, height int) (*C.VipsImage, error) {
var buf *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
if width > MaxSize || height > MaxSize {
return nil, errors.New("Maximum image size exceeded")
}
top, left = max(top), max(left)
err := C.vips_extract_area_bridge(image, &buf, C.int(left), C.int(top), C.int(width), C.int(height))
if err != 0 {
return nil, catchVipsError()
}
return buf, nil
}
func vipsSmartCrop(image *C.VipsImage, width, height int) (*C.VipsImage, error) {
var buf *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
if width > MaxSize || height > MaxSize {
return nil, errors.New("Maximum image size exceeded")
}
err := C.vips_smartcrop_bridge(image, &buf, C.int(width), C.int(height))
if err != 0 {
return nil, catchVipsError()
}
return buf, nil
}
func vipsTrim(image *C.VipsImage, background Color, threshold float64) (int, int, int, int, error) {
defer C.g_object_unref(C.gpointer(image))
top := C.int(0)
left := C.int(0)
width := C.int(0)
height := C.int(0)
err := C.vips_find_trim_bridge(image,
&top, &left, &width, &height,
C.double(background.R), C.double(background.G), C.double(background.B),
C.double(threshold))
if err != 0 {
return 0, 0, 0, 0, catchVipsError()
}
return int(top), int(left), int(width), int(height), nil
}
func vipsShrinkJpeg(buf []byte, input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
var ptr = unsafe.Pointer(&buf[0])
defer C.g_object_unref(C.gpointer(input))
err := C.vips_jpegload_buffer_shrink(ptr, C.size_t(len(buf)), &image, C.int(shrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsShrinkWebp(buf []byte, input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
var ptr = unsafe.Pointer(&buf[0])
defer C.g_object_unref(C.gpointer(input))
err := C.vips_webpload_buffer_shrink(ptr, C.size_t(len(buf)), &image, C.int(shrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsShrink(input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
defer C.g_object_unref(C.gpointer(input))
err := C.vips_shrink_bridge(input, &image, C.double(float64(shrink)), C.double(float64(shrink)))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsReduce(input *C.VipsImage, xshrink float64, yshrink float64) (*C.VipsImage, error) {
var image *C.VipsImage
defer C.g_object_unref(C.gpointer(input))
err := C.vips_reduce_bridge(input, &image, C.double(xshrink), C.double(yshrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsEmbed(input *C.VipsImage, left, top, width, height int, extend Extend, background Color) (*C.VipsImage, error) {
var image *C.VipsImage
// Max extend value, see: https://jcupitt.github.io/libvips/API/current/libvips-conversion.html#VipsExtend
if extend > 5 {
extend = ExtendBackground
}
defer C.g_object_unref(C.gpointer(input))
err := C.vips_embed_bridge(input, &image, C.int(left), C.int(top), C.int(width),
C.int(height), C.int(extend), C.double(background.R), C.double(background.G), C.double(background.B))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsAffine(input *C.VipsImage, residualx, residualy float64, i Interpolator) (*C.VipsImage, error) {
var image *C.VipsImage
cstring := C.CString(i.String())
interpolator := C.vips_interpolate_new(cstring)
defer C.free(unsafe.Pointer(cstring))
defer C.g_object_unref(C.gpointer(input))
defer C.g_object_unref(C.gpointer(interpolator))
err := C.vips_affine_interpolator(input, &image, C.double(residualx), 0, 0, C.double(residualy), interpolator)
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsImageType(buf []byte) ImageType {
if len(buf) < 12 {
return UNKNOWN
}
if buf[0] == 0xFF && buf[1] == 0xD8 && buf[2] == 0xFF {
return JPEG
}
if IsTypeSupported(GIF) && buf[0] == 0x47 && buf[1] == 0x49 && buf[2] == 0x46 {
return GIF
}
if buf[0] == 0x89 && buf[1] == 0x50 && buf[2] == 0x4E && buf[3] == 0x47 {
return PNG
}
if IsTypeSupported(TIFF) &&
((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) ||
(buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A)) {
return TIFF
}
if IsTypeSupported(PDF) && buf[0] == 0x25 && buf[1] == 0x50 && buf[2] == 0x44 && buf[3] == 0x46 {
return PDF
}
if IsTypeSupported(WEBP) && buf[8] == 0x57 && buf[9] == 0x45 && buf[10] == 0x42 && buf[11] == 0x50 {
return WEBP
}
if IsTypeSupported(SVG) && IsSVGImage(buf) {
return SVG
}
if IsTypeSupported(MAGICK) && strings.HasSuffix(readImageType(buf), "MagickBuffer") {
return MAGICK
}
return UNKNOWN
}
func readImageType(buf []byte) string {
length := C.size_t(len(buf))
imageBuf := unsafe.Pointer(&buf[0])
load := C.vips_foreign_find_load_buffer(imageBuf, length)
return C.GoString(load)
}
func catchVipsError() error {
s := C.GoString(C.vips_error_buffer())
C.vips_error_clear()
C.vips_thread_shutdown()
return errors.New(s)
}
func boolToInt(b bool) int {
if b {
return 1
}
return 0
}
func vipsGaussianBlur(image *C.VipsImage, o GaussianBlur) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_gaussblur_bridge(image, &out, C.double(o.Sigma), C.double(o.MinAmpl))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsSharpen(image *C.VipsImage, o Sharpen) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_sharpen_bridge(image, &out, C.int(o.Radius), C.double(o.X1), C.double(o.Y2), C.double(o.Y3), C.double(o.M1), C.double(o.M2))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func max(x int) int {
return int(math.Max(float64(x), 0))
}
func vipsDrawWatermark(image *C.VipsImage, o WatermarkImage) (*C.VipsImage, error) {
var out *C.VipsImage
watermark, _, e := vipsRead(o.Buf)
if e != nil {
return nil, e
}
opts := vipsWatermarkImageOptions{C.int(o.Left), C.int(o.Top), C.float(o.Opacity)}
err := C.vips_watermark_image(image, watermark, &out, (*C.WatermarkImageOptions)(unsafe.Pointer(&opts)))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
| [
"\"VIPS_CONCURRENCY\"",
"\"VIPS_TRACE\""
] | [] | [
"VIPS_TRACE",
"VIPS_CONCURRENCY"
] | [] | ["VIPS_TRACE", "VIPS_CONCURRENCY"] | go | 2 | 0 | |
python/helpers/pydev/pydevconsole.py | '''
Entry point module to start the interactive console.
'''
from _pydev_bundle._pydev_getopt import gnu_getopt
from _pydev_comm.pydev_rpc import make_rpc_client, start_rpc_server, start_rpc_server_and_make_client
from _pydev_imps._pydev_saved_modules import thread
start_new_thread = thread.start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole
import os
import sys
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE, dict_keys
from _pydevd_bundle.pydevd_utils import save_main_module
from _pydev_bundle import fix_getpass
fix_getpass.fix_getpass()
from _pydev_bundle.pydev_imports import _queue
try:
import __builtin__
except:
import builtins as __builtin__ # @UnresolvedImport
from _pydev_bundle.pydev_stdin import BaseStdIn
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface
from _pydev_bundle.pydev_console_types import Command
IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3
IS_PY24 = sys.version_info[0] == 2 and sys.version_info[1] == 4
try:
try:
execfile #Not in Py3k
except NameError:
from _pydev_bundle.pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from _pydev_bundle.pydev_umd import runfile, _set_globals_function
if sys.version_info[0] >= 3:
import builtins # @UnresolvedImport
builtins.runfile = runfile
else:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, mainThread, connect_status_queue=None, rpc_client=None):
BaseInterpreterInterface.__init__(self, mainThread, connect_status_queue, rpc_client)
self.namespace = {}
self.save_main()
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def save_main(self):
m = save_main_module('<input>', 'pydevconsole')
self.namespace = m.__dict__
try:
self.namespace['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
def do_add_exec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def get_namespace(self):
return self.namespace
def close(self):
sys.exit(0)
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def activate_mpl_if_already_imported(interpreter):
if interpreter.mpl_modules_for_patching:
for module in dict_keys(interpreter.mpl_modules_for_patching):
if module in sys.modules:
activate_function = interpreter.mpl_modules_for_patching.pop(module)
activate_function()
def init_set_return_control_back(interpreter):
from pydev_ipython.inputhook import set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
def init_mpl_in_console(interpreter):
init_set_return_control_back(interpreter)
if not INTERACTIVE_MODE_AVAILABLE:
return
activate_mpl_if_already_imported(interpreter)
from _pydev_bundle.pydev_import_hook import import_hook_manager
for mod in dict_keys(interpreter.mpl_modules_for_patching):
import_hook_manager.add_module_name(mod, interpreter.mpl_modules_for_patching.pop(mod))
if sys.platform != 'win32':
def pid_exists(pid):
# Note that this function in the face of errors will conservatively consider that
# the pid is still running (because we'll exit the current process when it's
# no longer running, so, we need to be 100% sure it actually exited).
import errno
if pid == 0:
# According to "man 2 kill" PID 0 has a special meaning:
# it refers to <<every process in the process group of the
# calling process>> so we don't want to go any further.
# If we get here it means this UNIX platform *does* have
# a process with id 0.
return True
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH) therefore we should never get
# here. If we do, although it's an error, consider it
# exists (see first comment in this function).
return True
else:
return True
else:
def pid_exists(pid):
# Note that this function in the face of errors will conservatively consider that
# the pid is still running (because we'll exit the current process when it's
# no longer running, so, we need to be 100% sure it actually exited).
import ctypes
kernel32 = ctypes.windll.kernel32
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
ERROR_INVALID_PARAMETER = 0x57
STILL_ACTIVE = 259
process = kernel32.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_QUERY_LIMITED_INFORMATION, 0, pid)
if not process:
err = kernel32.GetLastError()
if err == ERROR_INVALID_PARAMETER:
# Means it doesn't exist (pid parameter is wrong).
return False
# There was some unexpected error (such as access denied), so
# consider it exists (although it could be something else, but we don't want
# to raise any errors -- so, just consider it exists).
return True
try:
zero = ctypes.c_int(0)
exit_code = ctypes.pointer(zero)
exit_code_suceeded = kernel32.GetExitCodeProcess(process, exit_code)
if not exit_code_suceeded:
# There was some unexpected error (such as access denied), so
# consider it exists (although it could be something else, but we don't want
# to raise any errors -- so, just consider it exists).
return True
elif bool(exit_code.contents.value) and int(exit_code.contents.value) != STILL_ACTIVE:
return False
finally:
kernel32.CloseHandle(process)
return True
def process_exec_queue(interpreter):
init_mpl_in_console(interpreter)
from pydev_ipython.inputhook import get_inputhook
try:
kill_if_pid_not_alive = int(os.environ.get('PYDEV_ECLIPSE_PID', '-1'))
except:
kill_if_pid_not_alive = -1
while 1:
if kill_if_pid_not_alive != -1:
if not pid_exists(kill_if_pid_not_alive):
exit()
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
with interpreter.vars_lock:
if hasattr(code_fragment, '__call__'):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
interpreter.add_exec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from _pydev_bundle.pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def do_exit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def enable_thrift_logging():
"""Sets up `thriftpy` logger
The logger is used in `thriftpy/server.py` for logging exceptions.
"""
import logging
# create logger
logger = logging.getLogger('_shaded_thriftpy')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def create_server_handler_factory(interpreter):
def server_handler_factory(rpc_client):
interpreter.rpc_client = rpc_client
return interpreter
return server_handler_factory
def start_server(port):
if port is None:
port = 0
# 0. General stuff
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
from pydev_console.pydev_protocol import PythonConsoleBackendService, PythonConsoleFrontendService
enable_thrift_logging()
server_service = PythonConsoleBackendService
client_service = PythonConsoleFrontendService
# 1. Start Python console server
# `InterpreterInterface` implements all methods required for `server_handler`
interpreter = InterpreterInterface(threading.currentThread())
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server_socket = start_rpc_server_and_make_client('', port, server_service, client_service, create_server_handler_factory(interpreter))
# 2. Print server port for the IDE
_, server_port = server_socket.getsockname()
print(server_port)
# 3. Wait for IDE to connect to the server
process_exec_queue(interpreter)
def start_client(host, port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
from pydev_console.pydev_protocol import PythonConsoleBackendService, PythonConsoleFrontendService
enable_thrift_logging()
client_service = PythonConsoleFrontendService
client, server_transport = make_rpc_client(client_service, host, port)
interpreter = InterpreterInterface(threading.currentThread(), rpc_client=client)
# we do not need to start the server in a new thread because it does not need to accept a client connection, it already has it
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server_service = PythonConsoleBackendService
# `InterpreterInterface` implements all methods required for the handler
server_handler = interpreter
start_rpc_server(server_transport, server_service, server_handler)
process_exec_queue(interpreter)
def get_ipython_hidden_vars():
if IPYTHON and hasattr(__builtin__, 'interpreter'):
interpreter = get_interpreter()
return interpreter.get_ipython_hidden_vars_dict()
else:
try:
ipython_shell = get_ipython()
from _pydev_bundle.pydev_ipython_console_011 import get_ipython_hidden_vars
return get_ipython_hidden_vars(ipython_shell)
except:
pass
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
__builtin__.interpreter = interpreterInterface
print(interpreterInterface.get_greeting_msg())
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn(sys.stdin)
# parse command-line arguments
optlist, _ = gnu_getopt(sys.argv, 'm:h:p', ['mode=', 'host=', 'port='])
mode = None
host = None
port = None
for opt, arg in optlist:
if opt in ('-m', '--mode'):
mode = arg
elif opt in ('-h', '--host'):
host = arg
elif opt in ('-p', '--port'):
port = int(arg)
if mode not in ('client', 'server'):
sys.exit(-1)
if mode == 'client':
if not port:
# port must be set for client
sys.exit(-1)
if not host:
from _pydev_bundle import pydev_localhost
host = client_host = pydev_localhost.get_localhost()
pydevconsole.start_client(host, port)
elif mode == 'server':
pydevconsole.start_server(port)
| [] | [] | [
"PYDEV_ECLIPSE_PID",
"IPYTHONENABLE"
] | [] | ["PYDEV_ECLIPSE_PID", "IPYTHONENABLE"] | python | 2 | 0 | |
service.go | package main
import (
"fmt"
"github.com/google/cadvisor/client"
"github.com/google/cadvisor/info/v1"
"github.com/rancher/go-rancher-metadata/metadata"
rclient "github.com/rancher/go-rancher/client"
"github.com/urfave/cli"
"log"
"os"
"strings"
"time"
)
const (
// Rancher metadata endpoint URL
metadataUrl = "http://rancher-metadata.rancher.internal/2015-12-19"
// interval at which each goroutine polls cAdvisor for metrics
pollCadvisorInterval = 2 * time.Second
// interval at which to poll metadata
pollMetadataInterval = 10 * time.Second
// interval at which to print statistics, should be divisible by pollCadvisorInterval
printStatisticsInterval = 10 * time.Second
// interval at which to analyze metrics, should be divisible by pollCadvisorInterval
analyzeMetricsInterval = 2 * time.Second
)
func ServiceCommand() cli.Command {
return cli.Command{
Name: "service",
Usage: "Autoscale a service",
ArgsUsage: "<stack/service>",
Action: ScaleService,
Flags: []cli.Flag{
cli.Float64Flag{
Name: "min-cpu",
Usage: "Minimum CPU usage threshold in percent",
Value: 0,
},
cli.Float64Flag{
Name: "max-cpu",
Usage: "Maximum CPU usage threshold in percent",
Value: 100,
},
cli.Float64Flag{
Name: "min-mem",
Usage: "Minimum Memory usage threshold in MiB",
Value: 0,
},
cli.Float64Flag{
Name: "max-mem",
Usage: "Memory Usage threshold in percent",
Value: 4096,
},
cli.StringFlag{
Name: "and",
Usage: "Both CPU and Memory minimum or maximum thresholds must be met",
},
cli.DurationFlag{
Name: "period",
Usage: "",
Value: 60 * time.Second,
},
cli.DurationFlag{
Name: "warmup",
Usage: "",
Value: 60 * time.Second,
},
cli.DurationFlag{
Name: "cooldown",
Usage: "",
Value: 60 * time.Second,
},
cli.StringFlag{
Name: "verbose, v",
Usage: "Enable verbose logging output",
},
cli.StringFlag{
Name: "url",
Usage: "Rancher API URL",
Value: os.Getenv("CATTLE_URL"),
},
cli.StringFlag{
Name: "access-key",
Usage: "Rancher Access Key",
Value: os.Getenv("CATTLE_ACCESS_KEY"),
},
cli.StringFlag{
Name: "secret-key",
Usage: "Rancher Secret Key",
Value: os.Getenv("CATTLE_SECRET_KEY"),
},
},
}
}
type AutoscaleContext struct {
// configuration argument
StackName string
Service metadata.Service
RClient *rclient.RancherClient
RService *rclient.Service
// configuration parameters
MinCpuThreshold float64
MaxCpuThreshold float64
MinMemThreshold float64
MaxMemThreshold float64
And bool
Period time.Duration
Warmup time.Duration
Cooldown time.Duration
Verbose bool
mClient *metadata.Client
mContainers []metadata.Container
mHosts []metadata.Host
CContainers []v1.ContainerInfo
cInfoMap map[string]*v1.ContainerInfo
requestCount int
addedCount int
deletedCount int
metrics chan v1.ContainerInfo
done chan bool
}
func NewAutoscaleContext(c *cli.Context) *AutoscaleContext {
stackservice := c.Args().First()
if stackservice == "" {
cli.ShowCommandHelp(c, "service")
os.Exit(1)
}
tokens := strings.Split(stackservice, "/")
stackName := tokens[0]
serviceName := tokens[1]
mclient := metadata.NewClient(metadataUrl)
service, err := mclient.GetSelfServiceByName(serviceName)
if err != nil {
log.Fatalln(err)
}
rcontainers, err := mclient.GetServiceContainers(serviceName, stackName)
if err != nil {
log.Fatalln(err)
}
// get rancher hosts
rhosts, err := mclient.GetHosts()
if err != nil {
log.Fatalln(err)
}
rcli, err := rclient.NewRancherClient(&rclient.ClientOpts{
Url: c.String("url"),
AccessKey: c.String("access-key"),
SecretKey: c.String("secret-key"),
})
if err != nil {
log.Fatalln(err)
}
services, err := rcli.Service.List(&rclient.ListOpts{
Filters: map[string]interface{}{
"uuid": service.UUID,
},
})
if err != nil {
log.Fatalln(err)
}
if len(services.Data) > 1 {
log.Fatalln("Multiple services returned with UUID", service.UUID)
}
client := &AutoscaleContext{
StackName: stackName,
Service: service,
RClient: rcli,
RService: &services.Data[0],
MinCpuThreshold: c.Float64("min-cpu"),
MaxCpuThreshold: c.Float64("max-cpu"),
MinMemThreshold: c.Float64("min-mem"),
MaxMemThreshold: c.Float64("max-mem"),
And: c.String("and") == "true",
Period: c.Duration("period"),
Warmup: c.Duration("warmup"),
Cooldown: c.Duration("cooldown"),
Verbose: c.String("verbose") == "true",
mClient: &mclient,
mContainers: rcontainers,
mHosts: rhosts,
cInfoMap: make(map[string]*v1.ContainerInfo),
metrics: make(chan v1.ContainerInfo),
done: make(chan bool),
}
fmt.Printf("Monitoring '%s' service in '%s' stack, %d containers across %d hosts\n",
serviceName, stackName, len(rcontainers), len(rhosts))
if client.Verbose {
fmt.Println("Container Information:")
for _, container := range rcontainers {
fmt.Printf("\t(%s) %v\n", container.Name, container)
}
fmt.Println("Host Information:")
for _, host := range rhosts {
fmt.Printf("\t(%s) %v\n", host.Name, host)
}
}
// get cadvisor containers
return client
}
func ScaleService(c *cli.Context) error {
ctx := NewAutoscaleContext(c)
if err := ctx.GetCadvisorContainers(); err != nil {
return err
}
go ctx.ProcessMetrics()
ctx.PollMetadataChanges()
return nil
}
func (c *AutoscaleContext) GetCadvisorContainers() error {
for _, host := range c.mHosts {
address := "http://" + host.AgentIP + ":9244/"
cli, err := client.NewClient(address)
if err != nil {
return err
}
containers, err := cli.AllDockerContainers(&v1.ContainerInfoRequest{NumStats: 0})
if err != nil {
return err
}
for _, container := range containers {
for _, rancherContainer := range c.mContainers {
if rancherContainer.Name == container.Labels["io.rancher.container.name"] {
c.CContainers = append(c.CContainers, container)
go c.PollContinuously(container.Id, host.AgentIP)
// spread out the requests evenly
time.Sleep(time.Duration(int(pollCadvisorInterval) / c.Service.Scale))
break
}
}
}
}
return nil
}
// indefinitely poll for service scale changes
func (c *AutoscaleContext) PollMetadataChanges() {
for {
time.Sleep(pollMetadataInterval)
service, err := (*c.mClient).GetSelfServiceByName(c.Service.Name)
if err != nil {
log.Println(err)
}
// if the service is scaled up/down, we accomplished our goal
if service.Scale != c.Service.Scale {
select {
case <-c.done:
// already shutting down, we caused the scale change
default:
fmt.Printf("Detected scale up: %d -> %d\n", c.Service.Scale, service.Scale)
}
c.done <- true
fmt.Printf("Exiting")
break
}
}
}
// process incoming metrics
func (c *AutoscaleContext) ProcessMetrics() {
fmt.Println("Started processing metrics")
for {
select {
case <-c.done:
c.done <- true
fmt.Println("Stopped processing metrics")
return
case metric := <-c.metrics:
if _, exists := c.cInfoMap[metric.Id]; !exists {
c.cInfoMap[metric.Id] = &metric
} else {
// append new metrics
c.addedCount += len(metric.Stats)
c.cInfoMap[metric.Id].Stats = append(c.cInfoMap[metric.Id].Stats, metric.Stats...)
if len(c.cInfoMap[metric.Id].Stats) >= 2 {
c.DeleteOldMetrics(c.cInfoMap[metric.Id])
c.AnalyzeMetrics()
}
}
c.PrintStatistics()
}
}
}
func (c *AutoscaleContext) PrintStatistics() {
if c.requestCount%(int(printStatisticsInterval/pollCadvisorInterval)*c.Service.Scale) == 0 {
fmt.Printf("added: %6d, deleted: %6d, in-memory: %5d, requests: %6d\n",
c.addedCount, c.deletedCount, c.addedCount-c.deletedCount, c.requestCount)
if c.Verbose {
for _, info := range c.cInfoMap {
metrics := len(info.Stats)
window := StatsWindow(info.Stats, 0, 10*time.Millisecond)
fmt.Printf("\t(%s) metrics: %d, window: %v, rate: %f/sec\n", info.Labels["io.rancher.container.name"],
metrics, window, float64(metrics)/float64(window/time.Second))
}
}
}
}
// analyze metric window and trigger scale operations
func (c *AutoscaleContext) AnalyzeMetrics() {
if c.requestCount%(int(analyzeMetricsInterval/pollCadvisorInterval)*c.Service.Scale) != 0 {
return
}
averageCpu := float64(0) // average CPU usage (over configured period)
averageMem := float64(0) // average RAM usage (instantaneous)
averageRxBytes := float64(0) // total inbound network traffic
averageTxBytes := float64(0) // total outbound network traffic
fullWindow := true
for _, cinfo := range c.cInfoMap {
stats := cinfo.Stats
// we absolutely need two or more metrics to look at a time window
if len(stats) < 2 {
return
}
begin := stats[0]
end := stats[len(stats)-1]
duration := end.Timestamp.Sub(begin.Timestamp)
fullWindow = fullWindow && (duration >= c.Period)
averageCpu += float64(end.Cpu.Usage.Total-begin.Cpu.Usage.Total) /
float64(duration) / float64(len(begin.Cpu.Usage.PerCpu)) * 100
// TODO (llparse) determine if we should do averages across the window
// as this is an instantaneous measurement
averageMem += float64(end.Memory.Usage)
averageRxBytes += float64(end.Network.InterfaceStats.RxBytes-begin.Network.InterfaceStats.RxBytes) / float64(duration/time.Second)
averageTxBytes += float64(end.Network.InterfaceStats.TxBytes-begin.Network.InterfaceStats.TxBytes) / float64(duration/time.Second)
// fmt.Printf("%s %v %+v\n", cinfo.Name, end.Timestamp, end.DiskIo)
}
averageCpu /= float64(c.Service.Scale)
averageCpu = float64(int64(averageCpu*10)) / 10
averageMem = averageMem / float64(c.Service.Scale) / 1024 / 1024
averageRx := averageRxBytes / float64(c.Service.Scale) / 1024
averageTx := averageTxBytes / float64(c.Service.Scale) / 1024
fmt.Printf("avg cpu: %5.1f%%, avg mem: %7.1fMiB, avg rx: %5.1fKiB/s, avg tx: %5.1fKiB/s\n",
averageCpu, averageMem, averageRx, averageTx)
// we absolutely need a full time window across all containers to make decisions
if !fullWindow {
return
}
// all conditions must be met
if c.And {
if averageCpu >= c.MaxCpuThreshold && averageMem >= c.MaxMemThreshold {
c.ScaleUp()
}
if averageCpu <= c.MinCpuThreshold && averageMem <= c.MinMemThreshold {
c.ScaleDown()
}
// any condition must be met
} else {
if averageCpu >= c.MaxCpuThreshold || averageMem >= c.MaxMemThreshold {
c.ScaleUp()
}
if averageCpu <= c.MinCpuThreshold || averageMem <= c.MinMemThreshold {
c.ScaleDown()
}
}
}
func (c *AutoscaleContext) ScaleUp() {
c.Scale(1)
}
func (c *AutoscaleContext) ScaleDown() {
c.Scale(-1)
}
func (c *AutoscaleContext) Scale(offset int64) {
var adjective string
var delay time.Duration
if offset > 0 {
adjective = "up"
delay = c.Warmup
} else {
adjective = "down"
delay = c.Cooldown
}
newScale := c.RService.Scale + offset
if newScale <= 0 {
fmt.Printf("Ignoring scale %s: %d -> %d\n", adjective, c.RService.Scale, newScale)
return
} else {
fmt.Printf("Triggered scale %s: %d -> %d\n", adjective, c.RService.Scale, newScale)
}
// sometimes Rancher takes ages to respond so do this async
go func() {
_, err := c.RClient.Service.Update(c.RService, map[string]interface{}{
"scale": newScale,
})
if err != nil {
log.Fatalln(err)
}
}()
// process completes when we scale
c.done <- true
// warmup or cooldown
if offset < 0 {
fmt.Printf("Cooling down for %v\n", delay)
} else {
fmt.Printf("Warming up for %v\n", delay)
}
time.Sleep(delay)
fmt.Println("Exiting")
}
// delete metrics outside of the time window
func (c *AutoscaleContext) DeleteOldMetrics(cinfo *v1.ContainerInfo) {
precision := 100 * time.Millisecond
for ; StatsWindow(cinfo.Stats, 1, precision) >= c.Period; c.deletedCount += 1 {
//if !cinfo.Stats[0].Timestamp.Before(windowStart) || window > 0 && window < c.Period {
// fmt.Printf(" Deleting %v from %s\n", cinfo.Stats[0].Timestamp, cinfo.Labels["io.rancher.container.name"])
cinfo.Stats = append(cinfo.Stats[:0], cinfo.Stats[1:]...)
}
}
func StatsWindow(stats []*v1.ContainerStats, offset int, round time.Duration) time.Duration {
if len(stats) < 2 {
return time.Duration(0)
}
return stats[len(stats)-1].Timestamp.Round(round).Sub(stats[offset].Timestamp.Round(round))
}
// poll cAdvisor continuously for container metrics
func (c *AutoscaleContext) PollContinuously(containerId string, hostIp string) {
address := "http://" + hostIp + ":9244/"
cli, err := client.NewClient(address)
if err != nil {
log.Fatalln(err)
}
start := time.Now()
for {
select {
case <-c.done:
c.done <- true
fmt.Printf("Stopped collecting metrics for container %s", containerId)
return
default:
}
time.Sleep(pollCadvisorInterval)
newStart := time.Now()
info, err := cli.DockerContainer(containerId, &v1.ContainerInfoRequest{
Start: start,
})
if err != nil {
fmt.Println(err)
}
start = newStart
c.metrics <- info
c.requestCount += 1
}
}
| [
"\"CATTLE_URL\"",
"\"CATTLE_ACCESS_KEY\"",
"\"CATTLE_SECRET_KEY\""
] | [] | [
"CATTLE_SECRET_KEY",
"CATTLE_URL",
"CATTLE_ACCESS_KEY"
] | [] | ["CATTLE_SECRET_KEY", "CATTLE_URL", "CATTLE_ACCESS_KEY"] | go | 3 | 0 | |
object_tracker.py | import os
# comment out below line to enable tensorflow logging outputs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from core.config import cfg
from PIL import Image
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
# deep sort imports
from deep_sort import preprocessing, nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
# for tqdm and historical trajectory
from tqdm import tqdm
from collections import deque
pts = [deque(maxlen=30) for _ in range(9999)]
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('video', './data/video/test.mp4', 'path to input video or set to 0 for webcam')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.50, 'score threshold')
flags.DEFINE_boolean('dont_show', False, 'dont show video output')
flags.DEFINE_boolean('info', False, 'show detailed info of tracked objects')
flags.DEFINE_boolean('count', False, 'count objects being tracked on screen')
def main(_argv):
# Definition of the parameters
max_cosine_distance = 0.4
nn_budget = None
nms_max_overlap = 1.0
# initialize deep sort
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
video_path = FLAGS.video
# load tflite model if flag is set
if FLAGS.framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
# otherwise load standard tensorflow saved model
else:
saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
# begin video capture
try:
vid = cv2.VideoCapture(int(video_path))
except:
vid = cv2.VideoCapture(video_path)
out = None
# get video ready to save locally if flag is set
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
#frame_num = 0
# while video is running
print("Total number of frames: ", frame_count)
pbar = tqdm(range(frame_count))
while True:
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
#frame_num +=1
#print('Frame #: ', frame_num)
pbar.update(1)
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
# run detections on tflite if flag is set
if FLAGS.framework == 'tflite':
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
# run detections using yolov3 if flag is set
if FLAGS.model == 'yolov3' and FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
# convert data to numpy arrays and slice out unused elements
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0]
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
original_h, original_w, _ = frame.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
# store all predictions in one parameter for simplicity when calling functions
pred_bbox = [bboxes, scores, classes, num_objects]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
#allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to customize tracker for only people)
allowed_classes = ['person','bicycle']
# loop through objects and use class index to get class name, allow only classes in allowed_classes list
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
if FLAGS.count:
cv2.putText(frame, "Objects being tracked: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (0, 255, 0), 2)
#print("Objects being tracked: {}".format(count))
# delete detections that are not in allowed_classes
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = encoder(frame, bboxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(bboxes, scores, names, features)]
#initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima supression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.predict()
tracker.update(detections)
# update tracks
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
# draw bbox on screen
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
cv2.putText(frame, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2)
# Tracking with historical trajectory
center = (int(((bbox[0])+(bbox[2]))/2),int(((bbox[1])+(bbox[3]))/2))
pts[track.track_id].append(center)
thickness = 5
# center point
cv2.circle(frame, (center), 1, color, thickness)
# draw motion path
for j in range(1, len(pts[track.track_id])):
if pts[track.track_id][j - 1] is None or pts[track.track_id][j] is None:
continue
thickness = int(np.sqrt(64 / float(j + 1)) * 2)
cv2.line(frame,(pts[track.track_id][j-1]), (pts[track.track_id][j]),(color),thickness)
# if enable info flag then print details about each track
if FLAGS.info:
print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id), class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))
# calculate frames per second of running detections
fps = 1.0 / (time.time() - start_time)
#print("FPS: %.2f" % fps)
cv2.putText(frame, "FPS: %f" %(fps), (5,100), 0, 5e-3 * 200, (0,0,0), 2)
result = np.asarray(frame)
result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if not FLAGS.dont_show:
cv2.imshow("Output Video", result)
# if output flag is set, save video file
if FLAGS.output:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'): break
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
slugbuilder/migrator/main.go | package main
import (
"errors"
"fmt"
"log"
"net/http"
"os"
ct "github.com/flynn/flynn/controller/types"
"github.com/flynn/flynn/pkg/exec"
"github.com/flynn/flynn/pkg/postgres"
"github.com/flynn/flynn/pkg/random"
)
var ErrNotFound = errors.New("slug not found")
func main() {
log.SetFlags(0)
log.SetOutput(os.Stdout)
log.Printf("running slug migrator")
if err := migrate(); err != nil {
log.Printf("error running slug migrator: %s", err)
os.Exit(1)
}
}
func migrate() error {
db := postgres.Wait(nil, nil)
slugbuilder, err := getSlugbuilderArtifact(db)
if err != nil {
log.Printf("error getting slugbuilder artifact: %s", err)
return err
}
artifacts, err := getActiveSlugArtifacts(db)
if err != nil {
log.Printf("error getting active slug artifacts: %s", err)
return err
}
log.Printf("converting %d active slugs to Flynn images", len(artifacts))
for i, artifact := range artifacts {
log.Printf("converting slug %s (%d/%d)", artifact.ID, i+1, len(artifacts))
newID, err := convert(slugbuilder, artifact.URI)
if err != nil {
if err == ErrNotFound {
log.Printf("skipping slug %s (%d/%d): slug no longer exists", artifact.ID, i+1, len(artifacts))
continue
}
log.Printf("error converting slug %s (%d/%d): %s", artifact.ID, i+1, len(artifacts), err)
return err
}
tx, err := db.Begin()
if err != nil {
return err
}
if err := tx.Exec(`UPDATE release_artifacts SET artifact_id = $1 WHERE artifact_id = $2`, newID, artifact.ID); err != nil {
tx.Rollback()
return err
}
if err := tx.Exec(`UPDATE artifacts SET deleted_at = now() WHERE artifact_id = $1`, artifact.ID); err != nil {
tx.Rollback()
return err
}
if err := tx.Commit(); err != nil {
return err
}
}
return nil
}
func getSlugbuilderArtifact(db *postgres.DB) (*ct.Artifact, error) {
sql := `
SELECT manifest, layer_url_template FROM artifacts
WHERE meta->>'flynn.component' = 'slugbuilder'
ORDER BY created_at DESC LIMIT 1
`
artifact := &ct.Artifact{
Type: ct.ArtifactTypeFlynn,
}
var layerURLTemplate *string
if err := db.QueryRow(sql).Scan(&artifact.RawManifest, &layerURLTemplate); err != nil {
return nil, err
}
if layerURLTemplate != nil {
artifact.LayerURLTemplate = *layerURLTemplate
}
return artifact, nil
}
func getActiveSlugArtifacts(db *postgres.DB) ([]*ct.Artifact, error) {
sql := `
SELECT artifact_id, uri FROM artifacts
WHERE type = 'file'
AND deleted_at IS NULL
AND artifact_id IN (
SELECT artifact_id FROM release_artifacts
WHERE release_id IN (
SELECT release_id FROM releases
WHERE meta->>'git' = 'true'
AND release_id IN (
SELECT release_id
FROM formations, json_each_text(formations.processes::json)
WHERE processes != 'null'
GROUP BY app_id, release_id
HAVING SUM(value::int) > 0
)
OR release_id IN (
SELECT release_id FROM apps
)
)
)
`
rows, err := db.Query(sql)
if err != nil {
return nil, err
}
defer rows.Close()
var artifacts []*ct.Artifact
for rows.Next() {
var artifact ct.Artifact
if err := rows.Scan(&artifact.ID, &artifact.URI); err != nil {
return nil, err
}
artifacts = append(artifacts, &artifact)
}
return artifacts, rows.Err()
}
func convert(slugbuilder *ct.Artifact, slugURL string) (string, error) {
res, err := http.Get(slugURL)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", ErrNotFound
} else if res.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
id := random.UUID()
cmd := exec.Command(slugbuilder, "/bin/convert-legacy-slug.sh")
cmd.Env = map[string]string{
"CONTROLLER_KEY": os.Getenv("CONTROLLER_KEY"),
"SLUG_IMAGE_ID": id,
}
cmd.Stdin = res.Body
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Volumes = []*ct.VolumeReq{{Path: "/tmp", DeleteOnStop: true}}
return id, cmd.Run()
}
| [
"\"CONTROLLER_KEY\""
] | [] | [
"CONTROLLER_KEY"
] | [] | ["CONTROLLER_KEY"] | go | 1 | 0 | |
eventhandlers.go | package main
import (
"errors"
"log"
"os"
"github.com/cloudevents/sdk-go/pkg/cloudevents"
tower "github.com/keptn-sandbox/ansibletower-service/ansibletower-provider"
keptn "github.com/keptn/go-utils/pkg/lib"
)
/**
* Here are all the handler functions for the individual event
See https://github.com/keptn/spec/blob/0.1.3/cloudevents.md for details on the payload
-> "sh.keptn.event.configuration.change"
-> "sh.keptn.events.deployment-finished"
-> "sh.keptn.events.tests-finished"
-> "sh.keptn.event.start-evaluation"
-> "sh.keptn.events.evaluation-done"
-> "sh.keptn.event.problem.open"
-> "sh.keptn.events.problem"
-> "sh.keptn.event.action.triggered"
*/
//
// Handles ConfigurationChangeEventType = "sh.keptn.event.configuration.change"
// TODO: add in your handler code
//
func HandleConfigurationChangeEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.ConfigurationChangeEventData) error {
log.Printf("Handling Configuration Changed Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles DeploymentFinishedEventType = "sh.keptn.events.deployment-finished"
// TODO: add in your handler code
//
func HandleDeploymentFinishedEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.DeploymentFinishedEventData) error {
log.Printf("Handling Deployment Finished Event: %s", incomingEvent.Context.GetID())
// capture start time for tests
// startTime := time.Now()
// run tests
// ToDo: Implement your tests here
// Send Test Finished Event
// return myKeptn.SendTestsFinishedEvent(&incomingEvent, "", "", startTime, "pass", nil, "ansibletower-service")
return nil
}
//
// Handles TestsFinishedEventType = "sh.keptn.events.tests-finished"
// TODO: add in your handler code
//
func HandleTestsFinishedEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.TestsFinishedEventData) error {
log.Printf("Handling Tests Finished Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles EvaluationDoneEventType = "sh.keptn.events.evaluation-done"
// TODO: add in your handler code
//
func HandleStartEvaluationEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.StartEvaluationEventData) error {
log.Printf("Handling Start Evaluation Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles DeploymentFinishedEventType = "sh.keptn.events.deployment-finished"
// TODO: add in your handler code
//
func HandleEvaluationDoneEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.EvaluationDoneEventData) error {
log.Printf("Handling Evaluation Done Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles ProblemOpenEventType = "sh.keptn.event.problem.open"
// Handles ProblemEventType = "sh.keptn.events.problem"
// TODO: add in your handler code
//
func HandleProblemEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.ProblemEventData) error {
log.Printf("Handling Problem Event: %s", incomingEvent.Context.GetID())
// Deprecated since Keptn 0.7.0 - use the HandleActionTriggeredEvent instead
return nil
}
//
// Handles ActionTriggeredEventType = "sh.keptn.event.action.triggered"
// TODO: add in your handler code
//
func HandleActionTriggeredEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.ActionTriggeredEventData) error {
log.Printf("Handling Action Triggered Event: %s", incomingEvent.Context.GetID())
// if the required input is not present, no action should be executed
if os.Getenv("ANSIBLETOWER_HOST") == "" {
return errors.New("Stopping execution of remediation action: ANSIBLETOWER_HOST is empty")
}
if os.Getenv("ANSIBLETOWER_TOKEN") == "" {
return errors.New("Stopping execution of remediation action: ANSIBLETOWER_TOKEN is empty")
}
// check if action is supported
if data.Action.Action == "job_template_launch" {
log.Printf("Supported action: %s", data.Action.Action)
// populate action started event from incoming event
actionStartedEventData := &keptn.ActionStartedEventData{}
err := incomingEvent.DataAs(actionStartedEventData)
if err != nil {
log.Printf("Got Data Error: %s", err.Error())
return err
// TODO should this send any event?
}
err = myKeptn.SendActionStartedEvent(&incomingEvent, data.Labels, "ansibletower-service")
if err != nil {
log.Printf("Got Error From SendActionStartedEvent: %s", err.Error())
return err
}
// launch job template
var jobURL string
jobURL, err = tower.LaunchJobTemplate(data)
if err != nil {
log.Printf("Error launching the template: %s", err.Error())
return err
}
// wait for job to finish
tower.WaitJobEnd(jobURL)
log.Println("Job finished.")
var actionResult keptn.ActionResult
actionResult.Result = "pass"
actionResult.Status = "succeeded"
myKeptn.SendActionFinishedEvent(&incomingEvent, actionResult, data.Labels, "ansibletower-service")
if err != nil {
log.Printf("Got Error From SendActionFinishedEvent: %s", err.Error())
return err
}
}
return nil
}
| [
"\"ANSIBLETOWER_HOST\"",
"\"ANSIBLETOWER_TOKEN\""
] | [] | [
"ANSIBLETOWER_TOKEN",
"ANSIBLETOWER_HOST"
] | [] | ["ANSIBLETOWER_TOKEN", "ANSIBLETOWER_HOST"] | go | 2 | 0 | |
oneflow/python/test/ops/test_sort.py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import os
import numpy as np
import oneflow as flow
import tensorflow as tf
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow.typing as oft
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(device_type, in_shape, axis, direction, data_type):
assert device_type in ["gpu", "cpu"]
assert data_type in ["float32", "double", "int8", "int32", "int64"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def SortJob(
input: oft.ListNumpy.Placeholder(
tuple([dim + 10 for dim in in_shape]),
dtype=type_name_to_flow_type[data_type],
)
):
with flow.scope.placement(device_type, "0:0"):
return flow.sort(input, axis, direction)
input = (np.random.random(in_shape) * 100).astype(type_name_to_np_type[data_type])
# OneFlow
of_out = SortJob([input]).get().numpy_list()[0]
# TensorFlow
tf_out = tf.sort(input, axis, direction)
assert np.array_equal(of_out, tf_out.numpy())
def gen_arg_list():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["in_shape"] = [(10,), (10, 10, 20)]
arg_dict["axis"] = [-1]
arg_dict["direction"] = ["ASCENDING", "DESCENDING"]
arg_dict["data_type"] = ["float32", "double"]
return GenArgList(arg_dict)
def gen_arg_list_for_test_axis():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["in_shape"] = [(10, 10, 20)]
arg_dict["axis"] = [-2, 0, 2]
arg_dict["direction"] = ["ASCENDING", "DESCENDING"]
arg_dict["data_type"] = ["int32", "int64"]
return GenArgList(arg_dict)
@flow.unittest.skip_unless_1n1d()
class TestSort(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_sort(test_case):
for arg in gen_arg_list():
compare_with_tensorflow(*arg)
for arg in gen_arg_list_for_test_axis():
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| [] | [] | [
"ONEFLOW_TEST_CPU_ONLY"
] | [] | ["ONEFLOW_TEST_CPU_ONLY"] | python | 1 | 0 | |
example/exampleapp/settings.py | import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv('SECRET_KEY', 'SECRET')
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'geography',
'demography',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'exampleapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'exampleapp.wsgi.application'
DATABASES = {}
if 'DATABASE_URL' in os.environ:
DATABASES['default'] = dj_database_url.config()
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
#########################
# demography settings
CENSUS_API_KEY = os.getenv('CENSUS_API_KEY')
DEMOGRAPHY_AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
DEMOGRAPHY_AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
DEMOGRAPHY_AWS_REGION = 'us-east-1'
DEMOGRAPHY_AWS_S3_BUCKET = os.getenv('AWS_S3_BUCKET')
DEMOGRAPHY_AWS_S3_UPLOAD_ROOT = 'election-results/data/us-census'
DEMOGRAPHY_AWS_ACL = 'public-read'
DEMOGRAPHY_AWS_CACHE_HEADER = 'max-age=31536000'
| [] | [] | [
"AWS_SECRET_ACCESS_KEY",
"SECRET_KEY",
"AWS_ACCESS_KEY_ID",
"CENSUS_API_KEY",
"AWS_S3_BUCKET"
] | [] | ["AWS_SECRET_ACCESS_KEY", "SECRET_KEY", "AWS_ACCESS_KEY_ID", "CENSUS_API_KEY", "AWS_S3_BUCKET"] | python | 5 | 0 | |
example/task_project/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'task_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
examples/service/conversations/conversation_webhook/update/conversation_webhook_update_example.go | package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/conversations/v1"
"github.com/RJPearson94/twilio-sdk-go/service/conversations/v1/conversation/webhook"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
"github.com/RJPearson94/twilio-sdk-go/utils"
)
var conversationClient *v1.Conversations
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
conversationClient = twilio.NewWithCredentials(creds).Conversations.V1
}
func main() {
resp, err := conversationClient.
Conversation("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Webhook("WHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Update(&webhook.UpdateWebhookInput{
Configuration: &webhook.UpdateWebhookConfigurationInput{
URL: utils.String("https://localhost.com/webhook"),
},
})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("SID: %s", resp.Sid)
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] | [] | [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] | [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | go | 2 | 0 | |
jina/orchestrate/deployments/config/k8s.py | import copy
from argparse import Namespace
from typing import Dict, Union, List, Optional, Tuple
from jina import __default_executor__
from jina.enums import PodRoleType
from jina.excepts import NoContainerizedError
from jina.orchestrate.deployments.config.k8slib import kubernetes_deployment
from jina.orchestrate.deployments.config.helper import (
get_image_name,
to_compatible_name,
get_base_executor_version,
construct_runtime_container_args,
validate_uses,
)
from jina.serve.networking import K8sGrpcConnectionPool
from jina.orchestrate.deployments import BaseDeployment
class K8sDeploymentConfig:
"""
Class that implements the output of configuration files for Kubernetes for a given Deployment.
"""
class _K8sDeployment:
def __init__(
self,
name: str,
version: str,
pod_type: PodRoleType,
jina_deployment_name: str,
shard_id: Optional[int],
common_args: Union['Namespace', Dict],
deployment_args: Union['Namespace', Dict],
k8s_namespace: str,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
self.name = name
self.dns_name = to_compatible_name(name)
self.version = version
self.pod_type = pod_type
self.jina_deployment_name = jina_deployment_name
self.shard_id = shard_id
self.common_args = common_args
self.deployment_args = deployment_args
self.num_replicas = getattr(self.deployment_args, 'replicas', 1)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
def get_gateway_yamls(
self,
) -> List[Dict]:
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-standard'
)
cargs = copy.copy(self.deployment_args)
cargs.env = None
cargs.deployments_addresses = self.k8s_deployments_addresses
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace',
'workspace_id',
'upload_files',
'noblock_on_start',
}
non_defaults = ArgNamespace.get_non_defaults_args(
cargs, set_gateway_parser(), taboo=taboo
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['gateway'] + _args
if not cargs.k8s_connection_pool:
container_args.append('--k8s-disable-connection-pool')
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
container_cmd='["jina"]',
container_args=f'{container_args}',
replicas=1,
pull_policy='IfNotPresent',
jina_deployment_name='gateway',
pod_type=self.pod_type,
port=self.common_args.port,
env=cargs.env,
)
def _get_image_name(self, uses: Optional[str]):
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-perf'
)
if uses is not None and uses != __default_executor__:
image_name = get_image_name(uses)
return image_name
def _get_container_args(self, cargs, pod_type):
uses_metas = cargs.uses_metas or {}
uses_with = self.deployment_args.uses_with
if cargs.uses != __default_executor__:
cargs.uses = 'config.yml'
return construct_runtime_container_args(
cargs, uses_metas, uses_with, pod_type
)
def get_runtime_yamls(
self,
) -> List[Dict]:
cargs = copy.copy(self.deployment_args)
image_name = self._get_image_name(cargs.uses)
image_name_uses_before = (
self._get_image_name(cargs.uses_before)
if hasattr(cargs, 'uses_before') and cargs.uses_before
else None
)
image_name_uses_after = (
self._get_image_name(cargs.uses_after)
if hasattr(cargs, 'uses_after') and cargs.uses_after
else None
)
container_args = self._get_container_args(cargs, pod_type=self.pod_type)
container_args_uses_before = None
if getattr(cargs, 'uses_before', False):
uses_before_cargs = copy.copy(cargs)
uses_before_cargs.uses = cargs.uses_before
uses_before_cargs.name = f'{self.common_args.name}/uses-before'
uses_before_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE
uses_before_cargs.uses_before_address = None
uses_before_cargs.uses_after_address = None
uses_before_cargs.uses_before = None
uses_before_cargs.uses_after = None
uses_before_cargs.uses_with = None
uses_before_cargs.uses_metas = None
uses_before_cargs.env = None
uses_before_cargs.connection_list = None
uses_before_cargs.runtime_cls = 'WorkerRuntime'
uses_before_cargs.pod_role = PodRoleType.WORKER
uses_before_cargs.polling = None
container_args_uses_before = self._get_container_args(
uses_before_cargs, PodRoleType.WORKER
)
container_args_uses_after = None
if getattr(cargs, 'uses_after', False):
uses_after_cargs = copy.copy(cargs)
uses_after_cargs.uses = cargs.uses_after
uses_after_cargs.name = f'{self.common_args.name}/uses-after'
uses_after_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_AFTER
uses_after_cargs.uses_before_address = None
uses_after_cargs.uses_after_address = None
uses_after_cargs.uses_before = None
uses_after_cargs.uses_after = None
uses_after_cargs.uses_with = None
uses_after_cargs.uses_metas = None
uses_after_cargs.env = None
uses_after_cargs.connection_list = None
uses_after_cargs.runtime_cls = 'WorkerRuntime'
uses_after_cargs.pod_role = PodRoleType.WORKER
uses_after_cargs.polling = None
container_args_uses_after = self._get_container_args(
uses_after_cargs, PodRoleType.WORKER
)
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
image_name_uses_after=image_name_uses_after,
image_name_uses_before=image_name_uses_before,
container_cmd='["jina"]',
container_cmd_uses_before='["jina"]',
container_cmd_uses_after='["jina"]',
container_args=f'{container_args}',
container_args_uses_before=container_args_uses_before,
container_args_uses_after=container_args_uses_after,
replicas=self.num_replicas,
pull_policy='IfNotPresent',
jina_deployment_name=self.jina_deployment_name,
pod_type=self.pod_type,
shard_id=self.shard_id,
env=cargs.env,
gpus=cargs.gpus if hasattr(cargs, 'gpus') else None,
)
def __init__(
self,
args: Union['Namespace', Dict],
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
# External Deployments should be ignored in a K8s based Flow
assert not (hasattr(args, 'external') and args.external)
if not validate_uses(args.uses):
raise NoContainerizedError(
f'Executor "{args.uses}" is not valid to be used in K8s. '
'You need to use a containerized Executor. You may check `jina hub --help` to see how Jina Hub can help you building containerized Executors.'
)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
self.head_deployment = None
self.args = copy.copy(args)
if k8s_namespace is not None:
# otherwise it will remain with the one from the original Deployment
self.args.k8s_namespace = k8s_namespace
self.args.k8s_connection_pool = k8s_connection_pool
self.name = self.args.name
self.deployment_args = self._get_deployment_args(self.args)
if self.deployment_args['head_deployment'] is not None:
self.head_deployment = self._K8sDeployment(
name=self.deployment_args['head_deployment'].name,
version=get_base_executor_version(),
shard_id=None,
jina_deployment_name=self.name,
common_args=self.args,
deployment_args=self.deployment_args['head_deployment'],
pod_type=PodRoleType.HEAD,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses,
)
self.worker_deployments = []
deployment_args = self.deployment_args['deployments']
for i, args in enumerate(deployment_args):
name = f'{self.name}-{i}' if len(deployment_args) > 1 else f'{self.name}'
self.worker_deployments.append(
self._K8sDeployment(
name=name,
version=get_base_executor_version(),
shard_id=i,
common_args=self.args,
deployment_args=args,
pod_type=PodRoleType.WORKER
if name != 'gateway'
else PodRoleType.GATEWAY,
jina_deployment_name=self.name,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses
if name == 'gateway'
else None,
)
)
def _get_deployment_args(self, args):
parsed_args = {
'head_deployment': None,
'deployments': [],
}
shards = getattr(args, 'shards', 1)
uses_before = getattr(args, 'uses_before', None)
uses_after = getattr(args, 'uses_after', None)
if args.name != 'gateway':
parsed_args['head_deployment'] = BaseDeployment._copy_to_head_args(
self.args
)
parsed_args['head_deployment'].gpus = None
parsed_args['head_deployment'].port = K8sGrpcConnectionPool.K8S_PORT
parsed_args['head_deployment'].uses = None
parsed_args['head_deployment'].uses_metas = None
parsed_args['head_deployment'].uses_with = None
parsed_args['head_deployment'].env = None
# if the k8s connection pool is disabled, the connection pool is managed manually
if not self.k8s_connection_pool:
import json
connection_list = {}
for i in range(shards):
name = (
f'{to_compatible_name(self.name)}-{i}'
if shards > 1
else f'{to_compatible_name(self.name)}'
)
connection_list[
str(i)
] = f'{name}.{self.k8s_namespace}.svc:{K8sGrpcConnectionPool.K8S_PORT}'
parsed_args['head_deployment'].connection_list = json.dumps(
connection_list
)
if uses_before:
parsed_args[
'head_deployment'
].uses_before_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE}'
)
if uses_after:
parsed_args[
'head_deployment'
].uses_after_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_AFTER}'
)
for i in range(shards):
cargs = copy.deepcopy(args)
cargs.shard_id = i
cargs.uses_before = None
cargs.uses_after = None
if args.name != 'gateway':
cargs.port = K8sGrpcConnectionPool.K8S_PORT
cargs.uses_before_address = None
cargs.uses_after_address = None
if shards > 1:
cargs.name = f'{cargs.name}-{i}'
if args.name == 'gateway':
cargs.pod_role = PodRoleType.GATEWAY
# the worker runtimes do not care
else:
cargs.k8s_connection_pool = False
parsed_args['deployments'].append(cargs)
return parsed_args
def to_k8s_yaml(
self,
) -> List[Tuple[str, List[Dict]]]:
"""
Return a list of dictionary configurations. One for each deployment in this Deployment
.. # noqa: DAR201
.. # noqa: DAR101
"""
if self.name == 'gateway':
return [
(
'gateway',
self.worker_deployments[0].get_gateway_yamls(),
)
]
else:
deployments = [self.head_deployment]
deployments.extend(self.worker_deployments)
return [
(
deployment.dns_name,
deployment.get_runtime_yamls(),
)
for deployment in deployments
]
| [] | [] | [
"JINA_K8S_USE_TEST_PIP"
] | [] | ["JINA_K8S_USE_TEST_PIP"] | python | 1 | 0 | |
sc2/paths.py | import logging
import os
import platform
import re
import subprocess
from pathlib import Path
logger = logging.getLogger(__name__)
BASEDIR = {
"Windows": "C:/Program Files (x86)/StarCraft II",
"Darwin": "/Applications/StarCraft II",
"Linux": "~/StarCraftII",
"WineLinux": "~/.wine/drive_c/Program Files (x86)/StarCraft II",
}
USERPATH = {
"Windows": "\\Documents\\StarCraft II\\ExecuteInfo.txt",
"Darwin": "/Library/Application Support/Blizzard/StarCraft II/ExecuteInfo.txt",
"Linux": None,
"WineLinux": None,
}
BINPATH = {
"Windows": "SC2_x64.exe",
"Darwin": "SC2.app/Contents/MacOS/SC2",
"Linux": "SC2_x64",
"WineLinux": "SC2_x64.exe",
}
CWD = {"Windows": "Support64", "Darwin": None, "Linux": None, "WineLinux": "Support64"}
PF = os.environ.get("SC2PF", platform.system())
def get_env():
# TODO: Linux env conf from: https://github.com/deepmind/pysc2/blob/master/pysc2/run_configs/platforms.py
return None
def get_runner_args(cwd):
if "WINE" in os.environ:
runner_dir = os.path.dirname(os.environ.get("WINE"))
# translate cwd from Unix to Windows path
win_cwd = subprocess.run(
[os.path.join(runner_dir, "winepath"), "-w", cwd],
capture_output=True,
text=True
).stdout.rstrip()
return [
os.environ.get("WINE"),
"start",
"/d",
win_cwd,
"/unix"
]
return []
def latest_executeble(versions_dir, base_build=None):
if base_build is None:
latest = max((int(p.name[4:]), p) for p in versions_dir.iterdir() if p.is_dir() and p.name.startswith("Base"))
else:
latest = (int(base_build[4:]), max(p for p in versions_dir.iterdir() if p.is_dir() and
p.name.startswith(str(base_build))))
version, path = latest
if version < 55958:
logger.critical(f"Your SC2 binary is too old. Upgrade to 3.16.1 or newer.")
exit(1)
return path / BINPATH[PF]
class _MetaPaths(type):
""""Lazily loads paths to allow importing the library even if SC2 isn't installed."""
def __setup(self):
if PF not in BASEDIR:
logger.critical(f"Unsupported platform '{PF}'")
exit(1)
try:
base = os.environ.get("SC2PATH")
if base is None and USERPATH[PF] is not None:
einfo = str(Path.home().expanduser()) + USERPATH[PF]
if os.path.isfile(einfo):
with open(einfo) as f:
content = f.read()
if content:
base = re.search(r" = (.*)Versions", content).group(1)
if not os.path.exists(base):
base = None
if base is None:
base = BASEDIR[PF]
self.BASE = Path(base).expanduser()
self.EXECUTABLE = latest_executeble(self.BASE / "Versions")
self.CWD = self.BASE / CWD[PF] if CWD[PF] else None
self.REPLAYS = self.BASE / "Replays"
if (self.BASE / "maps").exists():
self.MAPS = self.BASE / "maps"
else:
self.MAPS = self.BASE / "Maps"
except FileNotFoundError as e:
logger.critical(f"SC2 installation not found: File '{e.filename}' does not exist.")
exit(1)
def __getattr__(self, attr):
self.__setup()
return getattr(self, attr)
class Paths(metaclass=_MetaPaths):
"""Paths for SC2 folders, lazily loaded using the above metaclass."""
| [] | [] | [
"SC2PF",
"WINE",
"SC2PATH"
] | [] | ["SC2PF", "WINE", "SC2PATH"] | python | 3 | 0 | |
stdlib/zstdlib.go | // AUTO-GENERATED BY mkstdlib.go
package stdlib
var Symbols = map[string]string{
"adler32.Checksum": "hash/adler32",
"adler32.New": "hash/adler32",
"adler32.Size": "hash/adler32",
"aes.BlockSize": "crypto/aes",
"aes.KeySizeError": "crypto/aes",
"aes.NewCipher": "crypto/aes",
"ascii85.CorruptInputError": "encoding/ascii85",
"ascii85.Decode": "encoding/ascii85",
"ascii85.Encode": "encoding/ascii85",
"ascii85.MaxEncodedLen": "encoding/ascii85",
"ascii85.NewDecoder": "encoding/ascii85",
"ascii85.NewEncoder": "encoding/ascii85",
"asn1.BitString": "encoding/asn1",
"asn1.Enumerated": "encoding/asn1",
"asn1.Flag": "encoding/asn1",
"asn1.Marshal": "encoding/asn1",
"asn1.ObjectIdentifier": "encoding/asn1",
"asn1.RawContent": "encoding/asn1",
"asn1.RawValue": "encoding/asn1",
"asn1.StructuralError": "encoding/asn1",
"asn1.SyntaxError": "encoding/asn1",
"asn1.Unmarshal": "encoding/asn1",
"asn1.UnmarshalWithParams": "encoding/asn1",
"ast.ArrayType": "go/ast",
"ast.AssignStmt": "go/ast",
"ast.Bad": "go/ast",
"ast.BadDecl": "go/ast",
"ast.BadExpr": "go/ast",
"ast.BadStmt": "go/ast",
"ast.BasicLit": "go/ast",
"ast.BinaryExpr": "go/ast",
"ast.BlockStmt": "go/ast",
"ast.BranchStmt": "go/ast",
"ast.CallExpr": "go/ast",
"ast.CaseClause": "go/ast",
"ast.ChanDir": "go/ast",
"ast.ChanType": "go/ast",
"ast.CommClause": "go/ast",
"ast.Comment": "go/ast",
"ast.CommentGroup": "go/ast",
"ast.CommentMap": "go/ast",
"ast.CompositeLit": "go/ast",
"ast.Con": "go/ast",
"ast.DeclStmt": "go/ast",
"ast.DeferStmt": "go/ast",
"ast.Ellipsis": "go/ast",
"ast.EmptyStmt": "go/ast",
"ast.ExprStmt": "go/ast",
"ast.Field": "go/ast",
"ast.FieldFilter": "go/ast",
"ast.FieldList": "go/ast",
"ast.File": "go/ast",
"ast.FileExports": "go/ast",
"ast.Filter": "go/ast",
"ast.FilterDecl": "go/ast",
"ast.FilterFile": "go/ast",
"ast.FilterFuncDuplicates": "go/ast",
"ast.FilterImportDuplicates": "go/ast",
"ast.FilterPackage": "go/ast",
"ast.FilterUnassociatedComments": "go/ast",
"ast.ForStmt": "go/ast",
"ast.Fprint": "go/ast",
"ast.Fun": "go/ast",
"ast.FuncDecl": "go/ast",
"ast.FuncLit": "go/ast",
"ast.FuncType": "go/ast",
"ast.GenDecl": "go/ast",
"ast.GoStmt": "go/ast",
"ast.Ident": "go/ast",
"ast.IfStmt": "go/ast",
"ast.ImportSpec": "go/ast",
"ast.Importer": "go/ast",
"ast.IncDecStmt": "go/ast",
"ast.IndexExpr": "go/ast",
"ast.Inspect": "go/ast",
"ast.InterfaceType": "go/ast",
"ast.IsExported": "go/ast",
"ast.KeyValueExpr": "go/ast",
"ast.LabeledStmt": "go/ast",
"ast.Lbl": "go/ast",
"ast.MapType": "go/ast",
"ast.MergeMode": "go/ast",
"ast.MergePackageFiles": "go/ast",
"ast.NewCommentMap": "go/ast",
"ast.NewIdent": "go/ast",
"ast.NewObj": "go/ast",
"ast.NewPackage": "go/ast",
"ast.NewScope": "go/ast",
"ast.Node": "go/ast",
"ast.NotNilFilter": "go/ast",
"ast.ObjKind": "go/ast",
"ast.Object": "go/ast",
"ast.Package": "go/ast",
"ast.PackageExports": "go/ast",
"ast.ParenExpr": "go/ast",
"ast.Pkg": "go/ast",
"ast.Print": "go/ast",
"ast.RECV": "go/ast",
"ast.RangeStmt": "go/ast",
"ast.ReturnStmt": "go/ast",
"ast.SEND": "go/ast",
"ast.Scope": "go/ast",
"ast.SelectStmt": "go/ast",
"ast.SelectorExpr": "go/ast",
"ast.SendStmt": "go/ast",
"ast.SliceExpr": "go/ast",
"ast.SortImports": "go/ast",
"ast.StarExpr": "go/ast",
"ast.StructType": "go/ast",
"ast.SwitchStmt": "go/ast",
"ast.Typ": "go/ast",
"ast.TypeAssertExpr": "go/ast",
"ast.TypeSpec": "go/ast",
"ast.TypeSwitchStmt": "go/ast",
"ast.UnaryExpr": "go/ast",
"ast.ValueSpec": "go/ast",
"ast.Var": "go/ast",
"ast.Visitor": "go/ast",
"ast.Walk": "go/ast",
"atomic.AddInt32": "sync/atomic",
"atomic.AddInt64": "sync/atomic",
"atomic.AddUint32": "sync/atomic",
"atomic.AddUint64": "sync/atomic",
"atomic.AddUintptr": "sync/atomic",
"atomic.CompareAndSwapInt32": "sync/atomic",
"atomic.CompareAndSwapInt64": "sync/atomic",
"atomic.CompareAndSwapPointer": "sync/atomic",
"atomic.CompareAndSwapUint32": "sync/atomic",
"atomic.CompareAndSwapUint64": "sync/atomic",
"atomic.CompareAndSwapUintptr": "sync/atomic",
"atomic.LoadInt32": "sync/atomic",
"atomic.LoadInt64": "sync/atomic",
"atomic.LoadPointer": "sync/atomic",
"atomic.LoadUint32": "sync/atomic",
"atomic.LoadUint64": "sync/atomic",
"atomic.LoadUintptr": "sync/atomic",
"atomic.StoreInt32": "sync/atomic",
"atomic.StoreInt64": "sync/atomic",
"atomic.StorePointer": "sync/atomic",
"atomic.StoreUint32": "sync/atomic",
"atomic.StoreUint64": "sync/atomic",
"atomic.StoreUintptr": "sync/atomic",
"atomic.SwapInt32": "sync/atomic",
"atomic.SwapInt64": "sync/atomic",
"atomic.SwapPointer": "sync/atomic",
"atomic.SwapUint32": "sync/atomic",
"atomic.SwapUint64": "sync/atomic",
"atomic.SwapUintptr": "sync/atomic",
"atomic.Value": "sync/atomic",
"base32.CorruptInputError": "encoding/base32",
"base32.Encoding": "encoding/base32",
"base32.HexEncoding": "encoding/base32",
"base32.NewDecoder": "encoding/base32",
"base32.NewEncoder": "encoding/base32",
"base32.NewEncoding": "encoding/base32",
"base32.StdEncoding": "encoding/base32",
"base64.CorruptInputError": "encoding/base64",
"base64.Encoding": "encoding/base64",
"base64.NewDecoder": "encoding/base64",
"base64.NewEncoder": "encoding/base64",
"base64.NewEncoding": "encoding/base64",
"base64.NoPadding": "encoding/base64",
"base64.RawStdEncoding": "encoding/base64",
"base64.RawURLEncoding": "encoding/base64",
"base64.StdEncoding": "encoding/base64",
"base64.StdPadding": "encoding/base64",
"base64.URLEncoding": "encoding/base64",
"big.Above": "math/big",
"big.Accuracy": "math/big",
"big.AwayFromZero": "math/big",
"big.Below": "math/big",
"big.ErrNaN": "math/big",
"big.Exact": "math/big",
"big.Float": "math/big",
"big.Int": "math/big",
"big.Jacobi": "math/big",
"big.MaxBase": "math/big",
"big.MaxExp": "math/big",
"big.MaxPrec": "math/big",
"big.MinExp": "math/big",
"big.NewFloat": "math/big",
"big.NewInt": "math/big",
"big.NewRat": "math/big",
"big.ParseFloat": "math/big",
"big.Rat": "math/big",
"big.RoundingMode": "math/big",
"big.ToNearestAway": "math/big",
"big.ToNearestEven": "math/big",
"big.ToNegativeInf": "math/big",
"big.ToPositiveInf": "math/big",
"big.ToZero": "math/big",
"big.Word": "math/big",
"binary.BigEndian": "encoding/binary",
"binary.ByteOrder": "encoding/binary",
"binary.LittleEndian": "encoding/binary",
"binary.MaxVarintLen16": "encoding/binary",
"binary.MaxVarintLen32": "encoding/binary",
"binary.MaxVarintLen64": "encoding/binary",
"binary.PutUvarint": "encoding/binary",
"binary.PutVarint": "encoding/binary",
"binary.Read": "encoding/binary",
"binary.ReadUvarint": "encoding/binary",
"binary.ReadVarint": "encoding/binary",
"binary.Size": "encoding/binary",
"binary.Uvarint": "encoding/binary",
"binary.Varint": "encoding/binary",
"binary.Write": "encoding/binary",
"bufio.ErrAdvanceTooFar": "bufio",
"bufio.ErrBufferFull": "bufio",
"bufio.ErrInvalidUnreadByte": "bufio",
"bufio.ErrInvalidUnreadRune": "bufio",
"bufio.ErrNegativeAdvance": "bufio",
"bufio.ErrNegativeCount": "bufio",
"bufio.ErrTooLong": "bufio",
"bufio.MaxScanTokenSize": "bufio",
"bufio.NewReadWriter": "bufio",
"bufio.NewReader": "bufio",
"bufio.NewReaderSize": "bufio",
"bufio.NewScanner": "bufio",
"bufio.NewWriter": "bufio",
"bufio.NewWriterSize": "bufio",
"bufio.ReadWriter": "bufio",
"bufio.Reader": "bufio",
"bufio.ScanBytes": "bufio",
"bufio.ScanLines": "bufio",
"bufio.ScanRunes": "bufio",
"bufio.ScanWords": "bufio",
"bufio.Scanner": "bufio",
"bufio.SplitFunc": "bufio",
"bufio.Writer": "bufio",
"build.AllowBinary": "go/build",
"build.ArchChar": "go/build",
"build.Context": "go/build",
"build.Default": "go/build",
"build.FindOnly": "go/build",
"build.Import": "go/build",
"build.ImportComment": "go/build",
"build.ImportDir": "go/build",
"build.ImportMode": "go/build",
"build.IsLocalImport": "go/build",
"build.MultiplePackageError": "go/build",
"build.NoGoError": "go/build",
"build.Package": "go/build",
"build.ToolDir": "go/build",
"bytes.Buffer": "bytes",
"bytes.Compare": "bytes",
"bytes.Contains": "bytes",
"bytes.Count": "bytes",
"bytes.Equal": "bytes",
"bytes.EqualFold": "bytes",
"bytes.ErrTooLarge": "bytes",
"bytes.Fields": "bytes",
"bytes.FieldsFunc": "bytes",
"bytes.HasPrefix": "bytes",
"bytes.HasSuffix": "bytes",
"bytes.Index": "bytes",
"bytes.IndexAny": "bytes",
"bytes.IndexByte": "bytes",
"bytes.IndexFunc": "bytes",
"bytes.IndexRune": "bytes",
"bytes.Join": "bytes",
"bytes.LastIndex": "bytes",
"bytes.LastIndexAny": "bytes",
"bytes.LastIndexByte": "bytes",
"bytes.LastIndexFunc": "bytes",
"bytes.Map": "bytes",
"bytes.MinRead": "bytes",
"bytes.NewBuffer": "bytes",
"bytes.NewBufferString": "bytes",
"bytes.NewReader": "bytes",
"bytes.Reader": "bytes",
"bytes.Repeat": "bytes",
"bytes.Replace": "bytes",
"bytes.Runes": "bytes",
"bytes.Split": "bytes",
"bytes.SplitAfter": "bytes",
"bytes.SplitAfterN": "bytes",
"bytes.SplitN": "bytes",
"bytes.Title": "bytes",
"bytes.ToLower": "bytes",
"bytes.ToLowerSpecial": "bytes",
"bytes.ToTitle": "bytes",
"bytes.ToTitleSpecial": "bytes",
"bytes.ToUpper": "bytes",
"bytes.ToUpperSpecial": "bytes",
"bytes.Trim": "bytes",
"bytes.TrimFunc": "bytes",
"bytes.TrimLeft": "bytes",
"bytes.TrimLeftFunc": "bytes",
"bytes.TrimPrefix": "bytes",
"bytes.TrimRight": "bytes",
"bytes.TrimRightFunc": "bytes",
"bytes.TrimSpace": "bytes",
"bytes.TrimSuffix": "bytes",
"bzip2.NewReader": "compress/bzip2",
"bzip2.StructuralError": "compress/bzip2",
"cgi.Handler": "net/http/cgi",
"cgi.Request": "net/http/cgi",
"cgi.RequestFromMap": "net/http/cgi",
"cgi.Serve": "net/http/cgi",
"cipher.AEAD": "crypto/cipher",
"cipher.Block": "crypto/cipher",
"cipher.BlockMode": "crypto/cipher",
"cipher.NewCBCDecrypter": "crypto/cipher",
"cipher.NewCBCEncrypter": "crypto/cipher",
"cipher.NewCFBDecrypter": "crypto/cipher",
"cipher.NewCFBEncrypter": "crypto/cipher",
"cipher.NewCTR": "crypto/cipher",
"cipher.NewGCM": "crypto/cipher",
"cipher.NewGCMWithNonceSize": "crypto/cipher",
"cipher.NewOFB": "crypto/cipher",
"cipher.Stream": "crypto/cipher",
"cipher.StreamReader": "crypto/cipher",
"cipher.StreamWriter": "crypto/cipher",
"cmplx.Abs": "math/cmplx",
"cmplx.Acos": "math/cmplx",
"cmplx.Acosh": "math/cmplx",
"cmplx.Asin": "math/cmplx",
"cmplx.Asinh": "math/cmplx",
"cmplx.Atan": "math/cmplx",
"cmplx.Atanh": "math/cmplx",
"cmplx.Conj": "math/cmplx",
"cmplx.Cos": "math/cmplx",
"cmplx.Cosh": "math/cmplx",
"cmplx.Cot": "math/cmplx",
"cmplx.Exp": "math/cmplx",
"cmplx.Inf": "math/cmplx",
"cmplx.IsInf": "math/cmplx",
"cmplx.IsNaN": "math/cmplx",
"cmplx.Log": "math/cmplx",
"cmplx.Log10": "math/cmplx",
"cmplx.NaN": "math/cmplx",
"cmplx.Phase": "math/cmplx",
"cmplx.Polar": "math/cmplx",
"cmplx.Pow": "math/cmplx",
"cmplx.Rect": "math/cmplx",
"cmplx.Sin": "math/cmplx",
"cmplx.Sinh": "math/cmplx",
"cmplx.Sqrt": "math/cmplx",
"cmplx.Tan": "math/cmplx",
"cmplx.Tanh": "math/cmplx",
"color.Alpha": "image/color",
"color.Alpha16": "image/color",
"color.Alpha16Model": "image/color",
"color.AlphaModel": "image/color",
"color.Black": "image/color",
"color.CMYK": "image/color",
"color.CMYKModel": "image/color",
"color.CMYKToRGB": "image/color",
"color.Color": "image/color",
"color.Gray": "image/color",
"color.Gray16": "image/color",
"color.Gray16Model": "image/color",
"color.GrayModel": "image/color",
"color.Model": "image/color",
"color.ModelFunc": "image/color",
"color.NRGBA": "image/color",
"color.NRGBA64": "image/color",
"color.NRGBA64Model": "image/color",
"color.NRGBAModel": "image/color",
"color.Opaque": "image/color",
"color.Palette": "image/color",
"color.RGBA": "image/color",
"color.RGBA64": "image/color",
"color.RGBA64Model": "image/color",
"color.RGBAModel": "image/color",
"color.RGBToCMYK": "image/color",
"color.RGBToYCbCr": "image/color",
"color.Transparent": "image/color",
"color.White": "image/color",
"color.YCbCr": "image/color",
"color.YCbCrModel": "image/color",
"color.YCbCrToRGB": "image/color",
"constant.BinaryOp": "go/constant",
"constant.BitLen": "go/constant",
"constant.Bool": "go/constant",
"constant.BoolVal": "go/constant",
"constant.Bytes": "go/constant",
"constant.Compare": "go/constant",
"constant.Complex": "go/constant",
"constant.Denom": "go/constant",
"constant.Float": "go/constant",
"constant.Float32Val": "go/constant",
"constant.Float64Val": "go/constant",
"constant.Imag": "go/constant",
"constant.Int": "go/constant",
"constant.Int64Val": "go/constant",
"constant.Kind": "go/constant",
"constant.MakeBool": "go/constant",
"constant.MakeFloat64": "go/constant",
"constant.MakeFromBytes": "go/constant",
"constant.MakeFromLiteral": "go/constant",
"constant.MakeImag": "go/constant",
"constant.MakeInt64": "go/constant",
"constant.MakeString": "go/constant",
"constant.MakeUint64": "go/constant",
"constant.MakeUnknown": "go/constant",
"constant.Num": "go/constant",
"constant.Real": "go/constant",
"constant.Shift": "go/constant",
"constant.Sign": "go/constant",
"constant.String": "go/constant",
"constant.StringVal": "go/constant",
"constant.Uint64Val": "go/constant",
"constant.UnaryOp": "go/constant",
"constant.Unknown": "go/constant",
"cookiejar.Jar": "net/http/cookiejar",
"cookiejar.New": "net/http/cookiejar",
"cookiejar.Options": "net/http/cookiejar",
"cookiejar.PublicSuffixList": "net/http/cookiejar",
"crc32.Castagnoli": "hash/crc32",
"crc32.Checksum": "hash/crc32",
"crc32.ChecksumIEEE": "hash/crc32",
"crc32.IEEE": "hash/crc32",
"crc32.IEEETable": "hash/crc32",
"crc32.Koopman": "hash/crc32",
"crc32.MakeTable": "hash/crc32",
"crc32.New": "hash/crc32",
"crc32.NewIEEE": "hash/crc32",
"crc32.Size": "hash/crc32",
"crc32.Table": "hash/crc32",
"crc32.Update": "hash/crc32",
"crc64.Checksum": "hash/crc64",
"crc64.ECMA": "hash/crc64",
"crc64.ISO": "hash/crc64",
"crc64.MakeTable": "hash/crc64",
"crc64.New": "hash/crc64",
"crc64.Size": "hash/crc64",
"crc64.Table": "hash/crc64",
"crc64.Update": "hash/crc64",
"crypto.Decrypter": "crypto",
"crypto.DecrypterOpts": "crypto",
"crypto.Hash": "crypto",
"crypto.MD4": "crypto",
"crypto.MD5": "crypto",
"crypto.MD5SHA1": "crypto",
"crypto.PrivateKey": "crypto",
"crypto.PublicKey": "crypto",
"crypto.RIPEMD160": "crypto",
"crypto.RegisterHash": "crypto",
"crypto.SHA1": "crypto",
"crypto.SHA224": "crypto",
"crypto.SHA256": "crypto",
"crypto.SHA384": "crypto",
"crypto.SHA3_224": "crypto",
"crypto.SHA3_256": "crypto",
"crypto.SHA3_384": "crypto",
"crypto.SHA3_512": "crypto",
"crypto.SHA512": "crypto",
"crypto.SHA512_224": "crypto",
"crypto.SHA512_256": "crypto",
"crypto.Signer": "crypto",
"crypto.SignerOpts": "crypto",
"csv.ErrBareQuote": "encoding/csv",
"csv.ErrFieldCount": "encoding/csv",
"csv.ErrQuote": "encoding/csv",
"csv.ErrTrailingComma": "encoding/csv",
"csv.NewReader": "encoding/csv",
"csv.NewWriter": "encoding/csv",
"csv.ParseError": "encoding/csv",
"csv.Reader": "encoding/csv",
"csv.Writer": "encoding/csv",
"debug.FreeOSMemory": "runtime/debug",
"debug.GCStats": "runtime/debug",
"debug.PrintStack": "runtime/debug",
"debug.ReadGCStats": "runtime/debug",
"debug.SetGCPercent": "runtime/debug",
"debug.SetMaxStack": "runtime/debug",
"debug.SetMaxThreads": "runtime/debug",
"debug.SetPanicOnFault": "runtime/debug",
"debug.Stack": "runtime/debug",
"debug.WriteHeapDump": "runtime/debug",
"des.BlockSize": "crypto/des",
"des.KeySizeError": "crypto/des",
"des.NewCipher": "crypto/des",
"des.NewTripleDESCipher": "crypto/des",
"doc.AllDecls": "go/doc",
"doc.AllMethods": "go/doc",
"doc.Example": "go/doc",
"doc.Examples": "go/doc",
"doc.Filter": "go/doc",
"doc.Func": "go/doc",
"doc.IllegalPrefixes": "go/doc",
"doc.Mode": "go/doc",
"doc.New": "go/doc",
"doc.Note": "go/doc",
"doc.Package": "go/doc",
"doc.Synopsis": "go/doc",
"doc.ToHTML": "go/doc",
"doc.ToText": "go/doc",
"doc.Type": "go/doc",
"doc.Value": "go/doc",
"draw.Draw": "image/draw",
"draw.DrawMask": "image/draw",
"draw.Drawer": "image/draw",
"draw.FloydSteinberg": "image/draw",
"draw.Image": "image/draw",
"draw.Op": "image/draw",
"draw.Over": "image/draw",
"draw.Quantizer": "image/draw",
"draw.Src": "image/draw",
"driver.Bool": "database/sql/driver",
"driver.ColumnConverter": "database/sql/driver",
"driver.Conn": "database/sql/driver",
"driver.DefaultParameterConverter": "database/sql/driver",
"driver.Driver": "database/sql/driver",
"driver.ErrBadConn": "database/sql/driver",
"driver.ErrSkip": "database/sql/driver",
"driver.Execer": "database/sql/driver",
"driver.Int32": "database/sql/driver",
"driver.IsScanValue": "database/sql/driver",
"driver.IsValue": "database/sql/driver",
"driver.NotNull": "database/sql/driver",
"driver.Null": "database/sql/driver",
"driver.Queryer": "database/sql/driver",
"driver.Result": "database/sql/driver",
"driver.ResultNoRows": "database/sql/driver",
"driver.Rows": "database/sql/driver",
"driver.RowsAffected": "database/sql/driver",
"driver.Stmt": "database/sql/driver",
"driver.String": "database/sql/driver",
"driver.Tx": "database/sql/driver",
"driver.Value": "database/sql/driver",
"driver.ValueConverter": "database/sql/driver",
"driver.Valuer": "database/sql/driver",
"dsa.ErrInvalidPublicKey": "crypto/dsa",
"dsa.GenerateKey": "crypto/dsa",
"dsa.GenerateParameters": "crypto/dsa",
"dsa.L1024N160": "crypto/dsa",
"dsa.L2048N224": "crypto/dsa",
"dsa.L2048N256": "crypto/dsa",
"dsa.L3072N256": "crypto/dsa",
"dsa.ParameterSizes": "crypto/dsa",
"dsa.Parameters": "crypto/dsa",
"dsa.PrivateKey": "crypto/dsa",
"dsa.PublicKey": "crypto/dsa",
"dsa.Sign": "crypto/dsa",
"dsa.Verify": "crypto/dsa",
"dwarf.AddrType": "debug/dwarf",
"dwarf.ArrayType": "debug/dwarf",
"dwarf.Attr": "debug/dwarf",
"dwarf.AttrAbstractOrigin": "debug/dwarf",
"dwarf.AttrAccessibility": "debug/dwarf",
"dwarf.AttrAddrClass": "debug/dwarf",
"dwarf.AttrAllocated": "debug/dwarf",
"dwarf.AttrArtificial": "debug/dwarf",
"dwarf.AttrAssociated": "debug/dwarf",
"dwarf.AttrBaseTypes": "debug/dwarf",
"dwarf.AttrBitOffset": "debug/dwarf",
"dwarf.AttrBitSize": "debug/dwarf",
"dwarf.AttrByteSize": "debug/dwarf",
"dwarf.AttrCallColumn": "debug/dwarf",
"dwarf.AttrCallFile": "debug/dwarf",
"dwarf.AttrCallLine": "debug/dwarf",
"dwarf.AttrCalling": "debug/dwarf",
"dwarf.AttrCommonRef": "debug/dwarf",
"dwarf.AttrCompDir": "debug/dwarf",
"dwarf.AttrConstValue": "debug/dwarf",
"dwarf.AttrContainingType": "debug/dwarf",
"dwarf.AttrCount": "debug/dwarf",
"dwarf.AttrDataLocation": "debug/dwarf",
"dwarf.AttrDataMemberLoc": "debug/dwarf",
"dwarf.AttrDeclColumn": "debug/dwarf",
"dwarf.AttrDeclFile": "debug/dwarf",
"dwarf.AttrDeclLine": "debug/dwarf",
"dwarf.AttrDeclaration": "debug/dwarf",
"dwarf.AttrDefaultValue": "debug/dwarf",
"dwarf.AttrDescription": "debug/dwarf",
"dwarf.AttrDiscr": "debug/dwarf",
"dwarf.AttrDiscrList": "debug/dwarf",
"dwarf.AttrDiscrValue": "debug/dwarf",
"dwarf.AttrEncoding": "debug/dwarf",
"dwarf.AttrEntrypc": "debug/dwarf",
"dwarf.AttrExtension": "debug/dwarf",
"dwarf.AttrExternal": "debug/dwarf",
"dwarf.AttrFrameBase": "debug/dwarf",
"dwarf.AttrFriend": "debug/dwarf",
"dwarf.AttrHighpc": "debug/dwarf",
"dwarf.AttrIdentifierCase": "debug/dwarf",
"dwarf.AttrImport": "debug/dwarf",
"dwarf.AttrInline": "debug/dwarf",
"dwarf.AttrIsOptional": "debug/dwarf",
"dwarf.AttrLanguage": "debug/dwarf",
"dwarf.AttrLocation": "debug/dwarf",
"dwarf.AttrLowerBound": "debug/dwarf",
"dwarf.AttrLowpc": "debug/dwarf",
"dwarf.AttrMacroInfo": "debug/dwarf",
"dwarf.AttrName": "debug/dwarf",
"dwarf.AttrNamelistItem": "debug/dwarf",
"dwarf.AttrOrdering": "debug/dwarf",
"dwarf.AttrPriority": "debug/dwarf",
"dwarf.AttrProducer": "debug/dwarf",
"dwarf.AttrPrototyped": "debug/dwarf",
"dwarf.AttrRanges": "debug/dwarf",
"dwarf.AttrReturnAddr": "debug/dwarf",
"dwarf.AttrSegment": "debug/dwarf",
"dwarf.AttrSibling": "debug/dwarf",
"dwarf.AttrSpecification": "debug/dwarf",
"dwarf.AttrStartScope": "debug/dwarf",
"dwarf.AttrStaticLink": "debug/dwarf",
"dwarf.AttrStmtList": "debug/dwarf",
"dwarf.AttrStride": "debug/dwarf",
"dwarf.AttrStrideSize": "debug/dwarf",
"dwarf.AttrStringLength": "debug/dwarf",
"dwarf.AttrTrampoline": "debug/dwarf",
"dwarf.AttrType": "debug/dwarf",
"dwarf.AttrUpperBound": "debug/dwarf",
"dwarf.AttrUseLocation": "debug/dwarf",
"dwarf.AttrUseUTF8": "debug/dwarf",
"dwarf.AttrVarParam": "debug/dwarf",
"dwarf.AttrVirtuality": "debug/dwarf",
"dwarf.AttrVisibility": "debug/dwarf",
"dwarf.AttrVtableElemLoc": "debug/dwarf",
"dwarf.BasicType": "debug/dwarf",
"dwarf.BoolType": "debug/dwarf",
"dwarf.CharType": "debug/dwarf",
"dwarf.Class": "debug/dwarf",
"dwarf.ClassAddress": "debug/dwarf",
"dwarf.ClassBlock": "debug/dwarf",
"dwarf.ClassConstant": "debug/dwarf",
"dwarf.ClassExprLoc": "debug/dwarf",
"dwarf.ClassFlag": "debug/dwarf",
"dwarf.ClassLinePtr": "debug/dwarf",
"dwarf.ClassLocListPtr": "debug/dwarf",
"dwarf.ClassMacPtr": "debug/dwarf",
"dwarf.ClassRangeListPtr": "debug/dwarf",
"dwarf.ClassReference": "debug/dwarf",
"dwarf.ClassReferenceAlt": "debug/dwarf",
"dwarf.ClassReferenceSig": "debug/dwarf",
"dwarf.ClassString": "debug/dwarf",
"dwarf.ClassStringAlt": "debug/dwarf",
"dwarf.CommonType": "debug/dwarf",
"dwarf.ComplexType": "debug/dwarf",
"dwarf.Data": "debug/dwarf",
"dwarf.DecodeError": "debug/dwarf",
"dwarf.DotDotDotType": "debug/dwarf",
"dwarf.Entry": "debug/dwarf",
"dwarf.EnumType": "debug/dwarf",
"dwarf.EnumValue": "debug/dwarf",
"dwarf.ErrUnknownPC": "debug/dwarf",
"dwarf.Field": "debug/dwarf",
"dwarf.FloatType": "debug/dwarf",
"dwarf.FuncType": "debug/dwarf",
"dwarf.IntType": "debug/dwarf",
"dwarf.LineEntry": "debug/dwarf",
"dwarf.LineFile": "debug/dwarf",
"dwarf.LineReader": "debug/dwarf",
"dwarf.LineReaderPos": "debug/dwarf",
"dwarf.New": "debug/dwarf",
"dwarf.Offset": "debug/dwarf",
"dwarf.PtrType": "debug/dwarf",
"dwarf.QualType": "debug/dwarf",
"dwarf.Reader": "debug/dwarf",
"dwarf.StructField": "debug/dwarf",
"dwarf.StructType": "debug/dwarf",
"dwarf.Tag": "debug/dwarf",
"dwarf.TagAccessDeclaration": "debug/dwarf",
"dwarf.TagArrayType": "debug/dwarf",
"dwarf.TagBaseType": "debug/dwarf",
"dwarf.TagCatchDwarfBlock": "debug/dwarf",
"dwarf.TagClassType": "debug/dwarf",
"dwarf.TagCommonDwarfBlock": "debug/dwarf",
"dwarf.TagCommonInclusion": "debug/dwarf",
"dwarf.TagCompileUnit": "debug/dwarf",
"dwarf.TagCondition": "debug/dwarf",
"dwarf.TagConstType": "debug/dwarf",
"dwarf.TagConstant": "debug/dwarf",
"dwarf.TagDwarfProcedure": "debug/dwarf",
"dwarf.TagEntryPoint": "debug/dwarf",
"dwarf.TagEnumerationType": "debug/dwarf",
"dwarf.TagEnumerator": "debug/dwarf",
"dwarf.TagFileType": "debug/dwarf",
"dwarf.TagFormalParameter": "debug/dwarf",
"dwarf.TagFriend": "debug/dwarf",
"dwarf.TagImportedDeclaration": "debug/dwarf",
"dwarf.TagImportedModule": "debug/dwarf",
"dwarf.TagImportedUnit": "debug/dwarf",
"dwarf.TagInheritance": "debug/dwarf",
"dwarf.TagInlinedSubroutine": "debug/dwarf",
"dwarf.TagInterfaceType": "debug/dwarf",
"dwarf.TagLabel": "debug/dwarf",
"dwarf.TagLexDwarfBlock": "debug/dwarf",
"dwarf.TagMember": "debug/dwarf",
"dwarf.TagModule": "debug/dwarf",
"dwarf.TagMutableType": "debug/dwarf",
"dwarf.TagNamelist": "debug/dwarf",
"dwarf.TagNamelistItem": "debug/dwarf",
"dwarf.TagNamespace": "debug/dwarf",
"dwarf.TagPackedType": "debug/dwarf",
"dwarf.TagPartialUnit": "debug/dwarf",
"dwarf.TagPointerType": "debug/dwarf",
"dwarf.TagPtrToMemberType": "debug/dwarf",
"dwarf.TagReferenceType": "debug/dwarf",
"dwarf.TagRestrictType": "debug/dwarf",
"dwarf.TagRvalueReferenceType": "debug/dwarf",
"dwarf.TagSetType": "debug/dwarf",
"dwarf.TagSharedType": "debug/dwarf",
"dwarf.TagStringType": "debug/dwarf",
"dwarf.TagStructType": "debug/dwarf",
"dwarf.TagSubprogram": "debug/dwarf",
"dwarf.TagSubrangeType": "debug/dwarf",
"dwarf.TagSubroutineType": "debug/dwarf",
"dwarf.TagTemplateAlias": "debug/dwarf",
"dwarf.TagTemplateTypeParameter": "debug/dwarf",
"dwarf.TagTemplateValueParameter": "debug/dwarf",
"dwarf.TagThrownType": "debug/dwarf",
"dwarf.TagTryDwarfBlock": "debug/dwarf",
"dwarf.TagTypeUnit": "debug/dwarf",
"dwarf.TagTypedef": "debug/dwarf",
"dwarf.TagUnionType": "debug/dwarf",
"dwarf.TagUnspecifiedParameters": "debug/dwarf",
"dwarf.TagUnspecifiedType": "debug/dwarf",
"dwarf.TagVariable": "debug/dwarf",
"dwarf.TagVariant": "debug/dwarf",
"dwarf.TagVariantPart": "debug/dwarf",
"dwarf.TagVolatileType": "debug/dwarf",
"dwarf.TagWithStmt": "debug/dwarf",
"dwarf.Type": "debug/dwarf",
"dwarf.TypedefType": "debug/dwarf",
"dwarf.UcharType": "debug/dwarf",
"dwarf.UintType": "debug/dwarf",
"dwarf.UnspecifiedType": "debug/dwarf",
"dwarf.VoidType": "debug/dwarf",
"ecdsa.GenerateKey": "crypto/ecdsa",
"ecdsa.PrivateKey": "crypto/ecdsa",
"ecdsa.PublicKey": "crypto/ecdsa",
"ecdsa.Sign": "crypto/ecdsa",
"ecdsa.Verify": "crypto/ecdsa",
"elf.ARM_MAGIC_TRAMP_NUMBER": "debug/elf",
"elf.Class": "debug/elf",
"elf.DF_BIND_NOW": "debug/elf",
"elf.DF_ORIGIN": "debug/elf",
"elf.DF_STATIC_TLS": "debug/elf",
"elf.DF_SYMBOLIC": "debug/elf",
"elf.DF_TEXTREL": "debug/elf",
"elf.DT_BIND_NOW": "debug/elf",
"elf.DT_DEBUG": "debug/elf",
"elf.DT_ENCODING": "debug/elf",
"elf.DT_FINI": "debug/elf",
"elf.DT_FINI_ARRAY": "debug/elf",
"elf.DT_FINI_ARRAYSZ": "debug/elf",
"elf.DT_FLAGS": "debug/elf",
"elf.DT_HASH": "debug/elf",
"elf.DT_HIOS": "debug/elf",
"elf.DT_HIPROC": "debug/elf",
"elf.DT_INIT": "debug/elf",
"elf.DT_INIT_ARRAY": "debug/elf",
"elf.DT_INIT_ARRAYSZ": "debug/elf",
"elf.DT_JMPREL": "debug/elf",
"elf.DT_LOOS": "debug/elf",
"elf.DT_LOPROC": "debug/elf",
"elf.DT_NEEDED": "debug/elf",
"elf.DT_NULL": "debug/elf",
"elf.DT_PLTGOT": "debug/elf",
"elf.DT_PLTREL": "debug/elf",
"elf.DT_PLTRELSZ": "debug/elf",
"elf.DT_PREINIT_ARRAY": "debug/elf",
"elf.DT_PREINIT_ARRAYSZ": "debug/elf",
"elf.DT_REL": "debug/elf",
"elf.DT_RELA": "debug/elf",
"elf.DT_RELAENT": "debug/elf",
"elf.DT_RELASZ": "debug/elf",
"elf.DT_RELENT": "debug/elf",
"elf.DT_RELSZ": "debug/elf",
"elf.DT_RPATH": "debug/elf",
"elf.DT_RUNPATH": "debug/elf",
"elf.DT_SONAME": "debug/elf",
"elf.DT_STRSZ": "debug/elf",
"elf.DT_STRTAB": "debug/elf",
"elf.DT_SYMBOLIC": "debug/elf",
"elf.DT_SYMENT": "debug/elf",
"elf.DT_SYMTAB": "debug/elf",
"elf.DT_TEXTREL": "debug/elf",
"elf.DT_VERNEED": "debug/elf",
"elf.DT_VERNEEDNUM": "debug/elf",
"elf.DT_VERSYM": "debug/elf",
"elf.Data": "debug/elf",
"elf.Dyn32": "debug/elf",
"elf.Dyn64": "debug/elf",
"elf.DynFlag": "debug/elf",
"elf.DynTag": "debug/elf",
"elf.EI_ABIVERSION": "debug/elf",
"elf.EI_CLASS": "debug/elf",
"elf.EI_DATA": "debug/elf",
"elf.EI_NIDENT": "debug/elf",
"elf.EI_OSABI": "debug/elf",
"elf.EI_PAD": "debug/elf",
"elf.EI_VERSION": "debug/elf",
"elf.ELFCLASS32": "debug/elf",
"elf.ELFCLASS64": "debug/elf",
"elf.ELFCLASSNONE": "debug/elf",
"elf.ELFDATA2LSB": "debug/elf",
"elf.ELFDATA2MSB": "debug/elf",
"elf.ELFDATANONE": "debug/elf",
"elf.ELFMAG": "debug/elf",
"elf.ELFOSABI_86OPEN": "debug/elf",
"elf.ELFOSABI_AIX": "debug/elf",
"elf.ELFOSABI_ARM": "debug/elf",
"elf.ELFOSABI_FREEBSD": "debug/elf",
"elf.ELFOSABI_HPUX": "debug/elf",
"elf.ELFOSABI_HURD": "debug/elf",
"elf.ELFOSABI_IRIX": "debug/elf",
"elf.ELFOSABI_LINUX": "debug/elf",
"elf.ELFOSABI_MODESTO": "debug/elf",
"elf.ELFOSABI_NETBSD": "debug/elf",
"elf.ELFOSABI_NONE": "debug/elf",
"elf.ELFOSABI_NSK": "debug/elf",
"elf.ELFOSABI_OPENBSD": "debug/elf",
"elf.ELFOSABI_OPENVMS": "debug/elf",
"elf.ELFOSABI_SOLARIS": "debug/elf",
"elf.ELFOSABI_STANDALONE": "debug/elf",
"elf.ELFOSABI_TRU64": "debug/elf",
"elf.EM_386": "debug/elf",
"elf.EM_486": "debug/elf",
"elf.EM_68HC12": "debug/elf",
"elf.EM_68K": "debug/elf",
"elf.EM_860": "debug/elf",
"elf.EM_88K": "debug/elf",
"elf.EM_960": "debug/elf",
"elf.EM_AARCH64": "debug/elf",
"elf.EM_ALPHA": "debug/elf",
"elf.EM_ALPHA_STD": "debug/elf",
"elf.EM_ARC": "debug/elf",
"elf.EM_ARM": "debug/elf",
"elf.EM_COLDFIRE": "debug/elf",
"elf.EM_FR20": "debug/elf",
"elf.EM_H8S": "debug/elf",
"elf.EM_H8_300": "debug/elf",
"elf.EM_H8_300H": "debug/elf",
"elf.EM_H8_500": "debug/elf",
"elf.EM_IA_64": "debug/elf",
"elf.EM_M32": "debug/elf",
"elf.EM_ME16": "debug/elf",
"elf.EM_MIPS": "debug/elf",
"elf.EM_MIPS_RS3_LE": "debug/elf",
"elf.EM_MIPS_RS4_BE": "debug/elf",
"elf.EM_MIPS_X": "debug/elf",
"elf.EM_MMA": "debug/elf",
"elf.EM_NCPU": "debug/elf",
"elf.EM_NDR1": "debug/elf",
"elf.EM_NONE": "debug/elf",
"elf.EM_PARISC": "debug/elf",
"elf.EM_PCP": "debug/elf",
"elf.EM_PPC": "debug/elf",
"elf.EM_PPC64": "debug/elf",
"elf.EM_RCE": "debug/elf",
"elf.EM_RH32": "debug/elf",
"elf.EM_S370": "debug/elf",
"elf.EM_S390": "debug/elf",
"elf.EM_SH": "debug/elf",
"elf.EM_SPARC": "debug/elf",
"elf.EM_SPARC32PLUS": "debug/elf",
"elf.EM_SPARCV9": "debug/elf",
"elf.EM_ST100": "debug/elf",
"elf.EM_STARCORE": "debug/elf",
"elf.EM_TINYJ": "debug/elf",
"elf.EM_TRICORE": "debug/elf",
"elf.EM_V800": "debug/elf",
"elf.EM_VPP500": "debug/elf",
"elf.EM_X86_64": "debug/elf",
"elf.ET_CORE": "debug/elf",
"elf.ET_DYN": "debug/elf",
"elf.ET_EXEC": "debug/elf",
"elf.ET_HIOS": "debug/elf",
"elf.ET_HIPROC": "debug/elf",
"elf.ET_LOOS": "debug/elf",
"elf.ET_LOPROC": "debug/elf",
"elf.ET_NONE": "debug/elf",
"elf.ET_REL": "debug/elf",
"elf.EV_CURRENT": "debug/elf",
"elf.EV_NONE": "debug/elf",
"elf.ErrNoSymbols": "debug/elf",
"elf.File": "debug/elf",
"elf.FileHeader": "debug/elf",
"elf.FormatError": "debug/elf",
"elf.Header32": "debug/elf",
"elf.Header64": "debug/elf",
"elf.ImportedSymbol": "debug/elf",
"elf.Machine": "debug/elf",
"elf.NT_FPREGSET": "debug/elf",
"elf.NT_PRPSINFO": "debug/elf",
"elf.NT_PRSTATUS": "debug/elf",
"elf.NType": "debug/elf",
"elf.NewFile": "debug/elf",
"elf.OSABI": "debug/elf",
"elf.Open": "debug/elf",
"elf.PF_MASKOS": "debug/elf",
"elf.PF_MASKPROC": "debug/elf",
"elf.PF_R": "debug/elf",
"elf.PF_W": "debug/elf",
"elf.PF_X": "debug/elf",
"elf.PT_DYNAMIC": "debug/elf",
"elf.PT_HIOS": "debug/elf",
"elf.PT_HIPROC": "debug/elf",
"elf.PT_INTERP": "debug/elf",
"elf.PT_LOAD": "debug/elf",
"elf.PT_LOOS": "debug/elf",
"elf.PT_LOPROC": "debug/elf",
"elf.PT_NOTE": "debug/elf",
"elf.PT_NULL": "debug/elf",
"elf.PT_PHDR": "debug/elf",
"elf.PT_SHLIB": "debug/elf",
"elf.PT_TLS": "debug/elf",
"elf.Prog": "debug/elf",
"elf.Prog32": "debug/elf",
"elf.Prog64": "debug/elf",
"elf.ProgFlag": "debug/elf",
"elf.ProgHeader": "debug/elf",
"elf.ProgType": "debug/elf",
"elf.R_386": "debug/elf",
"elf.R_386_32": "debug/elf",
"elf.R_386_COPY": "debug/elf",
"elf.R_386_GLOB_DAT": "debug/elf",
"elf.R_386_GOT32": "debug/elf",
"elf.R_386_GOTOFF": "debug/elf",
"elf.R_386_GOTPC": "debug/elf",
"elf.R_386_JMP_SLOT": "debug/elf",
"elf.R_386_NONE": "debug/elf",
"elf.R_386_PC32": "debug/elf",
"elf.R_386_PLT32": "debug/elf",
"elf.R_386_RELATIVE": "debug/elf",
"elf.R_386_TLS_DTPMOD32": "debug/elf",
"elf.R_386_TLS_DTPOFF32": "debug/elf",
"elf.R_386_TLS_GD": "debug/elf",
"elf.R_386_TLS_GD_32": "debug/elf",
"elf.R_386_TLS_GD_CALL": "debug/elf",
"elf.R_386_TLS_GD_POP": "debug/elf",
"elf.R_386_TLS_GD_PUSH": "debug/elf",
"elf.R_386_TLS_GOTIE": "debug/elf",
"elf.R_386_TLS_IE": "debug/elf",
"elf.R_386_TLS_IE_32": "debug/elf",
"elf.R_386_TLS_LDM": "debug/elf",
"elf.R_386_TLS_LDM_32": "debug/elf",
"elf.R_386_TLS_LDM_CALL": "debug/elf",
"elf.R_386_TLS_LDM_POP": "debug/elf",
"elf.R_386_TLS_LDM_PUSH": "debug/elf",
"elf.R_386_TLS_LDO_32": "debug/elf",
"elf.R_386_TLS_LE": "debug/elf",
"elf.R_386_TLS_LE_32": "debug/elf",
"elf.R_386_TLS_TPOFF": "debug/elf",
"elf.R_386_TLS_TPOFF32": "debug/elf",
"elf.R_AARCH64": "debug/elf",
"elf.R_AARCH64_ABS16": "debug/elf",
"elf.R_AARCH64_ABS32": "debug/elf",
"elf.R_AARCH64_ABS64": "debug/elf",
"elf.R_AARCH64_ADD_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_ADR_GOT_PAGE": "debug/elf",
"elf.R_AARCH64_ADR_PREL_LO21": "debug/elf",
"elf.R_AARCH64_ADR_PREL_PG_HI21": "debug/elf",
"elf.R_AARCH64_ADR_PREL_PG_HI21_NC": "debug/elf",
"elf.R_AARCH64_CALL26": "debug/elf",
"elf.R_AARCH64_CONDBR19": "debug/elf",
"elf.R_AARCH64_COPY": "debug/elf",
"elf.R_AARCH64_GLOB_DAT": "debug/elf",
"elf.R_AARCH64_GOT_LD_PREL19": "debug/elf",
"elf.R_AARCH64_IRELATIVE": "debug/elf",
"elf.R_AARCH64_JUMP26": "debug/elf",
"elf.R_AARCH64_JUMP_SLOT": "debug/elf",
"elf.R_AARCH64_LD64_GOT_LO12_NC": "debug/elf",
"elf.R_AARCH64_LDST128_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_LDST16_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_LDST32_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_LDST64_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_LDST8_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_LD_PREL_LO19": "debug/elf",
"elf.R_AARCH64_MOVW_SABS_G0": "debug/elf",
"elf.R_AARCH64_MOVW_SABS_G1": "debug/elf",
"elf.R_AARCH64_MOVW_SABS_G2": "debug/elf",
"elf.R_AARCH64_MOVW_UABS_G0": "debug/elf",
"elf.R_AARCH64_MOVW_UABS_G0_NC": "debug/elf",
"elf.R_AARCH64_MOVW_UABS_G1": "debug/elf",
"elf.R_AARCH64_MOVW_UABS_G1_NC": "debug/elf",
"elf.R_AARCH64_MOVW_UABS_G2": "debug/elf",
"elf.R_AARCH64_MOVW_UABS_G2_NC": "debug/elf",
"elf.R_AARCH64_MOVW_UABS_G3": "debug/elf",
"elf.R_AARCH64_NONE": "debug/elf",
"elf.R_AARCH64_NULL": "debug/elf",
"elf.R_AARCH64_P32_ABS16": "debug/elf",
"elf.R_AARCH64_P32_ABS32": "debug/elf",
"elf.R_AARCH64_P32_ADD_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_ADR_GOT_PAGE": "debug/elf",
"elf.R_AARCH64_P32_ADR_PREL_LO21": "debug/elf",
"elf.R_AARCH64_P32_ADR_PREL_PG_HI21": "debug/elf",
"elf.R_AARCH64_P32_CALL26": "debug/elf",
"elf.R_AARCH64_P32_CONDBR19": "debug/elf",
"elf.R_AARCH64_P32_COPY": "debug/elf",
"elf.R_AARCH64_P32_GLOB_DAT": "debug/elf",
"elf.R_AARCH64_P32_GOT_LD_PREL19": "debug/elf",
"elf.R_AARCH64_P32_IRELATIVE": "debug/elf",
"elf.R_AARCH64_P32_JUMP26": "debug/elf",
"elf.R_AARCH64_P32_JUMP_SLOT": "debug/elf",
"elf.R_AARCH64_P32_LD32_GOT_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_LDST128_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_LDST16_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_LDST32_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_LDST64_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_LDST8_ABS_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_LD_PREL_LO19": "debug/elf",
"elf.R_AARCH64_P32_MOVW_SABS_G0": "debug/elf",
"elf.R_AARCH64_P32_MOVW_UABS_G0": "debug/elf",
"elf.R_AARCH64_P32_MOVW_UABS_G0_NC": "debug/elf",
"elf.R_AARCH64_P32_MOVW_UABS_G1": "debug/elf",
"elf.R_AARCH64_P32_PREL16": "debug/elf",
"elf.R_AARCH64_P32_PREL32": "debug/elf",
"elf.R_AARCH64_P32_RELATIVE": "debug/elf",
"elf.R_AARCH64_P32_TLSDESC": "debug/elf",
"elf.R_AARCH64_P32_TLSDESC_ADD_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_TLSDESC_ADR_PAGE21": "debug/elf",
"elf.R_AARCH64_P32_TLSDESC_ADR_PREL21": "debug/elf",
"elf.R_AARCH64_P32_TLSDESC_CALL": "debug/elf",
"elf.R_AARCH64_P32_TLSDESC_LD32_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_TLSDESC_LD_PREL19": "debug/elf",
"elf.R_AARCH64_P32_TLSGD_ADD_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_TLSGD_ADR_PAGE21": "debug/elf",
"elf.R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21": "debug/elf",
"elf.R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19": "debug/elf",
"elf.R_AARCH64_P32_TLSLE_ADD_TPREL_HI12": "debug/elf",
"elf.R_AARCH64_P32_TLSLE_ADD_TPREL_LO12": "debug/elf",
"elf.R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC": "debug/elf",
"elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G0": "debug/elf",
"elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC": "debug/elf",
"elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G1": "debug/elf",
"elf.R_AARCH64_P32_TLS_DTPMOD": "debug/elf",
"elf.R_AARCH64_P32_TLS_DTPREL": "debug/elf",
"elf.R_AARCH64_P32_TLS_TPREL": "debug/elf",
"elf.R_AARCH64_P32_TSTBR14": "debug/elf",
"elf.R_AARCH64_PREL16": "debug/elf",
"elf.R_AARCH64_PREL32": "debug/elf",
"elf.R_AARCH64_PREL64": "debug/elf",
"elf.R_AARCH64_RELATIVE": "debug/elf",
"elf.R_AARCH64_TLSDESC": "debug/elf",
"elf.R_AARCH64_TLSDESC_ADD": "debug/elf",
"elf.R_AARCH64_TLSDESC_ADD_LO12_NC": "debug/elf",
"elf.R_AARCH64_TLSDESC_ADR_PAGE21": "debug/elf",
"elf.R_AARCH64_TLSDESC_ADR_PREL21": "debug/elf",
"elf.R_AARCH64_TLSDESC_CALL": "debug/elf",
"elf.R_AARCH64_TLSDESC_LD64_LO12_NC": "debug/elf",
"elf.R_AARCH64_TLSDESC_LDR": "debug/elf",
"elf.R_AARCH64_TLSDESC_LD_PREL19": "debug/elf",
"elf.R_AARCH64_TLSDESC_OFF_G0_NC": "debug/elf",
"elf.R_AARCH64_TLSDESC_OFF_G1": "debug/elf",
"elf.R_AARCH64_TLSGD_ADD_LO12_NC": "debug/elf",
"elf.R_AARCH64_TLSGD_ADR_PAGE21": "debug/elf",
"elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21": "debug/elf",
"elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC": "debug/elf",
"elf.R_AARCH64_TLSIE_LD_GOTTPREL_PREL19": "debug/elf",
"elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC": "debug/elf",
"elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G1": "debug/elf",
"elf.R_AARCH64_TLSLE_ADD_TPREL_HI12": "debug/elf",
"elf.R_AARCH64_TLSLE_ADD_TPREL_LO12": "debug/elf",
"elf.R_AARCH64_TLSLE_ADD_TPREL_LO12_NC": "debug/elf",
"elf.R_AARCH64_TLSLE_MOVW_TPREL_G0": "debug/elf",
"elf.R_AARCH64_TLSLE_MOVW_TPREL_G0_NC": "debug/elf",
"elf.R_AARCH64_TLSLE_MOVW_TPREL_G1": "debug/elf",
"elf.R_AARCH64_TLSLE_MOVW_TPREL_G1_NC": "debug/elf",
"elf.R_AARCH64_TLSLE_MOVW_TPREL_G2": "debug/elf",
"elf.R_AARCH64_TLS_DTPMOD64": "debug/elf",
"elf.R_AARCH64_TLS_DTPREL64": "debug/elf",
"elf.R_AARCH64_TLS_TPREL64": "debug/elf",
"elf.R_AARCH64_TSTBR14": "debug/elf",
"elf.R_ALPHA": "debug/elf",
"elf.R_ALPHA_BRADDR": "debug/elf",
"elf.R_ALPHA_COPY": "debug/elf",
"elf.R_ALPHA_GLOB_DAT": "debug/elf",
"elf.R_ALPHA_GPDISP": "debug/elf",
"elf.R_ALPHA_GPREL32": "debug/elf",
"elf.R_ALPHA_GPRELHIGH": "debug/elf",
"elf.R_ALPHA_GPRELLOW": "debug/elf",
"elf.R_ALPHA_GPVALUE": "debug/elf",
"elf.R_ALPHA_HINT": "debug/elf",
"elf.R_ALPHA_IMMED_BR_HI32": "debug/elf",
"elf.R_ALPHA_IMMED_GP_16": "debug/elf",
"elf.R_ALPHA_IMMED_GP_HI32": "debug/elf",
"elf.R_ALPHA_IMMED_LO32": "debug/elf",
"elf.R_ALPHA_IMMED_SCN_HI32": "debug/elf",
"elf.R_ALPHA_JMP_SLOT": "debug/elf",
"elf.R_ALPHA_LITERAL": "debug/elf",
"elf.R_ALPHA_LITUSE": "debug/elf",
"elf.R_ALPHA_NONE": "debug/elf",
"elf.R_ALPHA_OP_PRSHIFT": "debug/elf",
"elf.R_ALPHA_OP_PSUB": "debug/elf",
"elf.R_ALPHA_OP_PUSH": "debug/elf",
"elf.R_ALPHA_OP_STORE": "debug/elf",
"elf.R_ALPHA_REFLONG": "debug/elf",
"elf.R_ALPHA_REFQUAD": "debug/elf",
"elf.R_ALPHA_RELATIVE": "debug/elf",
"elf.R_ALPHA_SREL16": "debug/elf",
"elf.R_ALPHA_SREL32": "debug/elf",
"elf.R_ALPHA_SREL64": "debug/elf",
"elf.R_ARM": "debug/elf",
"elf.R_ARM_ABS12": "debug/elf",
"elf.R_ARM_ABS16": "debug/elf",
"elf.R_ARM_ABS32": "debug/elf",
"elf.R_ARM_ABS8": "debug/elf",
"elf.R_ARM_AMP_VCALL9": "debug/elf",
"elf.R_ARM_COPY": "debug/elf",
"elf.R_ARM_GLOB_DAT": "debug/elf",
"elf.R_ARM_GNU_VTENTRY": "debug/elf",
"elf.R_ARM_GNU_VTINHERIT": "debug/elf",
"elf.R_ARM_GOT32": "debug/elf",
"elf.R_ARM_GOTOFF": "debug/elf",
"elf.R_ARM_GOTPC": "debug/elf",
"elf.R_ARM_JUMP_SLOT": "debug/elf",
"elf.R_ARM_NONE": "debug/elf",
"elf.R_ARM_PC13": "debug/elf",
"elf.R_ARM_PC24": "debug/elf",
"elf.R_ARM_PLT32": "debug/elf",
"elf.R_ARM_RABS32": "debug/elf",
"elf.R_ARM_RBASE": "debug/elf",
"elf.R_ARM_REL32": "debug/elf",
"elf.R_ARM_RELATIVE": "debug/elf",
"elf.R_ARM_RPC24": "debug/elf",
"elf.R_ARM_RREL32": "debug/elf",
"elf.R_ARM_RSBREL32": "debug/elf",
"elf.R_ARM_SBREL32": "debug/elf",
"elf.R_ARM_SWI24": "debug/elf",
"elf.R_ARM_THM_ABS5": "debug/elf",
"elf.R_ARM_THM_PC22": "debug/elf",
"elf.R_ARM_THM_PC8": "debug/elf",
"elf.R_ARM_THM_RPC22": "debug/elf",
"elf.R_ARM_THM_SWI8": "debug/elf",
"elf.R_ARM_THM_XPC22": "debug/elf",
"elf.R_ARM_XPC25": "debug/elf",
"elf.R_INFO": "debug/elf",
"elf.R_INFO32": "debug/elf",
"elf.R_PPC": "debug/elf",
"elf.R_PPC64": "debug/elf",
"elf.R_PPC64_ADDR14": "debug/elf",
"elf.R_PPC64_ADDR14_BRNTAKEN": "debug/elf",
"elf.R_PPC64_ADDR14_BRTAKEN": "debug/elf",
"elf.R_PPC64_ADDR16": "debug/elf",
"elf.R_PPC64_ADDR16_DS": "debug/elf",
"elf.R_PPC64_ADDR16_HA": "debug/elf",
"elf.R_PPC64_ADDR16_HI": "debug/elf",
"elf.R_PPC64_ADDR16_HIGHER": "debug/elf",
"elf.R_PPC64_ADDR16_HIGHERA": "debug/elf",
"elf.R_PPC64_ADDR16_HIGHEST": "debug/elf",
"elf.R_PPC64_ADDR16_HIGHESTA": "debug/elf",
"elf.R_PPC64_ADDR16_LO": "debug/elf",
"elf.R_PPC64_ADDR16_LO_DS": "debug/elf",
"elf.R_PPC64_ADDR24": "debug/elf",
"elf.R_PPC64_ADDR32": "debug/elf",
"elf.R_PPC64_ADDR64": "debug/elf",
"elf.R_PPC64_DTPMOD64": "debug/elf",
"elf.R_PPC64_DTPREL16": "debug/elf",
"elf.R_PPC64_DTPREL16_DS": "debug/elf",
"elf.R_PPC64_DTPREL16_HA": "debug/elf",
"elf.R_PPC64_DTPREL16_HI": "debug/elf",
"elf.R_PPC64_DTPREL16_HIGHER": "debug/elf",
"elf.R_PPC64_DTPREL16_HIGHERA": "debug/elf",
"elf.R_PPC64_DTPREL16_HIGHEST": "debug/elf",
"elf.R_PPC64_DTPREL16_HIGHESTA": "debug/elf",
"elf.R_PPC64_DTPREL16_LO": "debug/elf",
"elf.R_PPC64_DTPREL16_LO_DS": "debug/elf",
"elf.R_PPC64_DTPREL64": "debug/elf",
"elf.R_PPC64_GOT16": "debug/elf",
"elf.R_PPC64_GOT16_DS": "debug/elf",
"elf.R_PPC64_GOT16_HA": "debug/elf",
"elf.R_PPC64_GOT16_HI": "debug/elf",
"elf.R_PPC64_GOT16_LO": "debug/elf",
"elf.R_PPC64_GOT16_LO_DS": "debug/elf",
"elf.R_PPC64_GOT_DTPREL16_DS": "debug/elf",
"elf.R_PPC64_GOT_DTPREL16_HA": "debug/elf",
"elf.R_PPC64_GOT_DTPREL16_HI": "debug/elf",
"elf.R_PPC64_GOT_DTPREL16_LO_DS": "debug/elf",
"elf.R_PPC64_GOT_TLSGD16": "debug/elf",
"elf.R_PPC64_GOT_TLSGD16_HA": "debug/elf",
"elf.R_PPC64_GOT_TLSGD16_HI": "debug/elf",
"elf.R_PPC64_GOT_TLSGD16_LO": "debug/elf",
"elf.R_PPC64_GOT_TLSLD16": "debug/elf",
"elf.R_PPC64_GOT_TLSLD16_HA": "debug/elf",
"elf.R_PPC64_GOT_TLSLD16_HI": "debug/elf",
"elf.R_PPC64_GOT_TLSLD16_LO": "debug/elf",
"elf.R_PPC64_GOT_TPREL16_DS": "debug/elf",
"elf.R_PPC64_GOT_TPREL16_HA": "debug/elf",
"elf.R_PPC64_GOT_TPREL16_HI": "debug/elf",
"elf.R_PPC64_GOT_TPREL16_LO_DS": "debug/elf",
"elf.R_PPC64_JMP_SLOT": "debug/elf",
"elf.R_PPC64_NONE": "debug/elf",
"elf.R_PPC64_REL14": "debug/elf",
"elf.R_PPC64_REL14_BRNTAKEN": "debug/elf",
"elf.R_PPC64_REL14_BRTAKEN": "debug/elf",
"elf.R_PPC64_REL16": "debug/elf",
"elf.R_PPC64_REL16_HA": "debug/elf",
"elf.R_PPC64_REL16_HI": "debug/elf",
"elf.R_PPC64_REL16_LO": "debug/elf",
"elf.R_PPC64_REL24": "debug/elf",
"elf.R_PPC64_REL32": "debug/elf",
"elf.R_PPC64_REL64": "debug/elf",
"elf.R_PPC64_TLS": "debug/elf",
"elf.R_PPC64_TLSGD": "debug/elf",
"elf.R_PPC64_TLSLD": "debug/elf",
"elf.R_PPC64_TOC": "debug/elf",
"elf.R_PPC64_TOC16": "debug/elf",
"elf.R_PPC64_TOC16_DS": "debug/elf",
"elf.R_PPC64_TOC16_HA": "debug/elf",
"elf.R_PPC64_TOC16_HI": "debug/elf",
"elf.R_PPC64_TOC16_LO": "debug/elf",
"elf.R_PPC64_TOC16_LO_DS": "debug/elf",
"elf.R_PPC64_TPREL16": "debug/elf",
"elf.R_PPC64_TPREL16_DS": "debug/elf",
"elf.R_PPC64_TPREL16_HA": "debug/elf",
"elf.R_PPC64_TPREL16_HI": "debug/elf",
"elf.R_PPC64_TPREL16_HIGHER": "debug/elf",
"elf.R_PPC64_TPREL16_HIGHERA": "debug/elf",
"elf.R_PPC64_TPREL16_HIGHEST": "debug/elf",
"elf.R_PPC64_TPREL16_HIGHESTA": "debug/elf",
"elf.R_PPC64_TPREL16_LO": "debug/elf",
"elf.R_PPC64_TPREL16_LO_DS": "debug/elf",
"elf.R_PPC64_TPREL64": "debug/elf",
"elf.R_PPC_ADDR14": "debug/elf",
"elf.R_PPC_ADDR14_BRNTAKEN": "debug/elf",
"elf.R_PPC_ADDR14_BRTAKEN": "debug/elf",
"elf.R_PPC_ADDR16": "debug/elf",
"elf.R_PPC_ADDR16_HA": "debug/elf",
"elf.R_PPC_ADDR16_HI": "debug/elf",
"elf.R_PPC_ADDR16_LO": "debug/elf",
"elf.R_PPC_ADDR24": "debug/elf",
"elf.R_PPC_ADDR32": "debug/elf",
"elf.R_PPC_COPY": "debug/elf",
"elf.R_PPC_DTPMOD32": "debug/elf",
"elf.R_PPC_DTPREL16": "debug/elf",
"elf.R_PPC_DTPREL16_HA": "debug/elf",
"elf.R_PPC_DTPREL16_HI": "debug/elf",
"elf.R_PPC_DTPREL16_LO": "debug/elf",
"elf.R_PPC_DTPREL32": "debug/elf",
"elf.R_PPC_EMB_BIT_FLD": "debug/elf",
"elf.R_PPC_EMB_MRKREF": "debug/elf",
"elf.R_PPC_EMB_NADDR16": "debug/elf",
"elf.R_PPC_EMB_NADDR16_HA": "debug/elf",
"elf.R_PPC_EMB_NADDR16_HI": "debug/elf",
"elf.R_PPC_EMB_NADDR16_LO": "debug/elf",
"elf.R_PPC_EMB_NADDR32": "debug/elf",
"elf.R_PPC_EMB_RELSDA": "debug/elf",
"elf.R_PPC_EMB_RELSEC16": "debug/elf",
"elf.R_PPC_EMB_RELST_HA": "debug/elf",
"elf.R_PPC_EMB_RELST_HI": "debug/elf",
"elf.R_PPC_EMB_RELST_LO": "debug/elf",
"elf.R_PPC_EMB_SDA21": "debug/elf",
"elf.R_PPC_EMB_SDA2I16": "debug/elf",
"elf.R_PPC_EMB_SDA2REL": "debug/elf",
"elf.R_PPC_EMB_SDAI16": "debug/elf",
"elf.R_PPC_GLOB_DAT": "debug/elf",
"elf.R_PPC_GOT16": "debug/elf",
"elf.R_PPC_GOT16_HA": "debug/elf",
"elf.R_PPC_GOT16_HI": "debug/elf",
"elf.R_PPC_GOT16_LO": "debug/elf",
"elf.R_PPC_GOT_TLSGD16": "debug/elf",
"elf.R_PPC_GOT_TLSGD16_HA": "debug/elf",
"elf.R_PPC_GOT_TLSGD16_HI": "debug/elf",
"elf.R_PPC_GOT_TLSGD16_LO": "debug/elf",
"elf.R_PPC_GOT_TLSLD16": "debug/elf",
"elf.R_PPC_GOT_TLSLD16_HA": "debug/elf",
"elf.R_PPC_GOT_TLSLD16_HI": "debug/elf",
"elf.R_PPC_GOT_TLSLD16_LO": "debug/elf",
"elf.R_PPC_GOT_TPREL16": "debug/elf",
"elf.R_PPC_GOT_TPREL16_HA": "debug/elf",
"elf.R_PPC_GOT_TPREL16_HI": "debug/elf",
"elf.R_PPC_GOT_TPREL16_LO": "debug/elf",
"elf.R_PPC_JMP_SLOT": "debug/elf",
"elf.R_PPC_LOCAL24PC": "debug/elf",
"elf.R_PPC_NONE": "debug/elf",
"elf.R_PPC_PLT16_HA": "debug/elf",
"elf.R_PPC_PLT16_HI": "debug/elf",
"elf.R_PPC_PLT16_LO": "debug/elf",
"elf.R_PPC_PLT32": "debug/elf",
"elf.R_PPC_PLTREL24": "debug/elf",
"elf.R_PPC_PLTREL32": "debug/elf",
"elf.R_PPC_REL14": "debug/elf",
"elf.R_PPC_REL14_BRNTAKEN": "debug/elf",
"elf.R_PPC_REL14_BRTAKEN": "debug/elf",
"elf.R_PPC_REL24": "debug/elf",
"elf.R_PPC_REL32": "debug/elf",
"elf.R_PPC_RELATIVE": "debug/elf",
"elf.R_PPC_SDAREL16": "debug/elf",
"elf.R_PPC_SECTOFF": "debug/elf",
"elf.R_PPC_SECTOFF_HA": "debug/elf",
"elf.R_PPC_SECTOFF_HI": "debug/elf",
"elf.R_PPC_SECTOFF_LO": "debug/elf",
"elf.R_PPC_TLS": "debug/elf",
"elf.R_PPC_TPREL16": "debug/elf",
"elf.R_PPC_TPREL16_HA": "debug/elf",
"elf.R_PPC_TPREL16_HI": "debug/elf",
"elf.R_PPC_TPREL16_LO": "debug/elf",
"elf.R_PPC_TPREL32": "debug/elf",
"elf.R_PPC_UADDR16": "debug/elf",
"elf.R_PPC_UADDR32": "debug/elf",
"elf.R_SPARC": "debug/elf",
"elf.R_SPARC_10": "debug/elf",
"elf.R_SPARC_11": "debug/elf",
"elf.R_SPARC_13": "debug/elf",
"elf.R_SPARC_16": "debug/elf",
"elf.R_SPARC_22": "debug/elf",
"elf.R_SPARC_32": "debug/elf",
"elf.R_SPARC_5": "debug/elf",
"elf.R_SPARC_6": "debug/elf",
"elf.R_SPARC_64": "debug/elf",
"elf.R_SPARC_7": "debug/elf",
"elf.R_SPARC_8": "debug/elf",
"elf.R_SPARC_COPY": "debug/elf",
"elf.R_SPARC_DISP16": "debug/elf",
"elf.R_SPARC_DISP32": "debug/elf",
"elf.R_SPARC_DISP64": "debug/elf",
"elf.R_SPARC_DISP8": "debug/elf",
"elf.R_SPARC_GLOB_DAT": "debug/elf",
"elf.R_SPARC_GLOB_JMP": "debug/elf",
"elf.R_SPARC_GOT10": "debug/elf",
"elf.R_SPARC_GOT13": "debug/elf",
"elf.R_SPARC_GOT22": "debug/elf",
"elf.R_SPARC_H44": "debug/elf",
"elf.R_SPARC_HH22": "debug/elf",
"elf.R_SPARC_HI22": "debug/elf",
"elf.R_SPARC_HIPLT22": "debug/elf",
"elf.R_SPARC_HIX22": "debug/elf",
"elf.R_SPARC_HM10": "debug/elf",
"elf.R_SPARC_JMP_SLOT": "debug/elf",
"elf.R_SPARC_L44": "debug/elf",
"elf.R_SPARC_LM22": "debug/elf",
"elf.R_SPARC_LO10": "debug/elf",
"elf.R_SPARC_LOPLT10": "debug/elf",
"elf.R_SPARC_LOX10": "debug/elf",
"elf.R_SPARC_M44": "debug/elf",
"elf.R_SPARC_NONE": "debug/elf",
"elf.R_SPARC_OLO10": "debug/elf",
"elf.R_SPARC_PC10": "debug/elf",
"elf.R_SPARC_PC22": "debug/elf",
"elf.R_SPARC_PCPLT10": "debug/elf",
"elf.R_SPARC_PCPLT22": "debug/elf",
"elf.R_SPARC_PCPLT32": "debug/elf",
"elf.R_SPARC_PC_HH22": "debug/elf",
"elf.R_SPARC_PC_HM10": "debug/elf",
"elf.R_SPARC_PC_LM22": "debug/elf",
"elf.R_SPARC_PLT32": "debug/elf",
"elf.R_SPARC_PLT64": "debug/elf",
"elf.R_SPARC_REGISTER": "debug/elf",
"elf.R_SPARC_RELATIVE": "debug/elf",
"elf.R_SPARC_UA16": "debug/elf",
"elf.R_SPARC_UA32": "debug/elf",
"elf.R_SPARC_UA64": "debug/elf",
"elf.R_SPARC_WDISP16": "debug/elf",
"elf.R_SPARC_WDISP19": "debug/elf",
"elf.R_SPARC_WDISP22": "debug/elf",
"elf.R_SPARC_WDISP30": "debug/elf",
"elf.R_SPARC_WPLT30": "debug/elf",
"elf.R_SYM32": "debug/elf",
"elf.R_SYM64": "debug/elf",
"elf.R_TYPE32": "debug/elf",
"elf.R_TYPE64": "debug/elf",
"elf.R_X86_64": "debug/elf",
"elf.R_X86_64_16": "debug/elf",
"elf.R_X86_64_32": "debug/elf",
"elf.R_X86_64_32S": "debug/elf",
"elf.R_X86_64_64": "debug/elf",
"elf.R_X86_64_8": "debug/elf",
"elf.R_X86_64_COPY": "debug/elf",
"elf.R_X86_64_DTPMOD64": "debug/elf",
"elf.R_X86_64_DTPOFF32": "debug/elf",
"elf.R_X86_64_DTPOFF64": "debug/elf",
"elf.R_X86_64_GLOB_DAT": "debug/elf",
"elf.R_X86_64_GOT32": "debug/elf",
"elf.R_X86_64_GOTPCREL": "debug/elf",
"elf.R_X86_64_GOTTPOFF": "debug/elf",
"elf.R_X86_64_JMP_SLOT": "debug/elf",
"elf.R_X86_64_NONE": "debug/elf",
"elf.R_X86_64_PC16": "debug/elf",
"elf.R_X86_64_PC32": "debug/elf",
"elf.R_X86_64_PC8": "debug/elf",
"elf.R_X86_64_PLT32": "debug/elf",
"elf.R_X86_64_RELATIVE": "debug/elf",
"elf.R_X86_64_TLSGD": "debug/elf",
"elf.R_X86_64_TLSLD": "debug/elf",
"elf.R_X86_64_TPOFF32": "debug/elf",
"elf.R_X86_64_TPOFF64": "debug/elf",
"elf.Rel32": "debug/elf",
"elf.Rel64": "debug/elf",
"elf.Rela32": "debug/elf",
"elf.Rela64": "debug/elf",
"elf.SHF_ALLOC": "debug/elf",
"elf.SHF_EXECINSTR": "debug/elf",
"elf.SHF_GROUP": "debug/elf",
"elf.SHF_INFO_LINK": "debug/elf",
"elf.SHF_LINK_ORDER": "debug/elf",
"elf.SHF_MASKOS": "debug/elf",
"elf.SHF_MASKPROC": "debug/elf",
"elf.SHF_MERGE": "debug/elf",
"elf.SHF_OS_NONCONFORMING": "debug/elf",
"elf.SHF_STRINGS": "debug/elf",
"elf.SHF_TLS": "debug/elf",
"elf.SHF_WRITE": "debug/elf",
"elf.SHN_ABS": "debug/elf",
"elf.SHN_COMMON": "debug/elf",
"elf.SHN_HIOS": "debug/elf",
"elf.SHN_HIPROC": "debug/elf",
"elf.SHN_HIRESERVE": "debug/elf",
"elf.SHN_LOOS": "debug/elf",
"elf.SHN_LOPROC": "debug/elf",
"elf.SHN_LORESERVE": "debug/elf",
"elf.SHN_UNDEF": "debug/elf",
"elf.SHN_XINDEX": "debug/elf",
"elf.SHT_DYNAMIC": "debug/elf",
"elf.SHT_DYNSYM": "debug/elf",
"elf.SHT_FINI_ARRAY": "debug/elf",
"elf.SHT_GNU_ATTRIBUTES": "debug/elf",
"elf.SHT_GNU_HASH": "debug/elf",
"elf.SHT_GNU_LIBLIST": "debug/elf",
"elf.SHT_GNU_VERDEF": "debug/elf",
"elf.SHT_GNU_VERNEED": "debug/elf",
"elf.SHT_GNU_VERSYM": "debug/elf",
"elf.SHT_GROUP": "debug/elf",
"elf.SHT_HASH": "debug/elf",
"elf.SHT_HIOS": "debug/elf",
"elf.SHT_HIPROC": "debug/elf",
"elf.SHT_HIUSER": "debug/elf",
"elf.SHT_INIT_ARRAY": "debug/elf",
"elf.SHT_LOOS": "debug/elf",
"elf.SHT_LOPROC": "debug/elf",
"elf.SHT_LOUSER": "debug/elf",
"elf.SHT_NOBITS": "debug/elf",
"elf.SHT_NOTE": "debug/elf",
"elf.SHT_NULL": "debug/elf",
"elf.SHT_PREINIT_ARRAY": "debug/elf",
"elf.SHT_PROGBITS": "debug/elf",
"elf.SHT_REL": "debug/elf",
"elf.SHT_RELA": "debug/elf",
"elf.SHT_SHLIB": "debug/elf",
"elf.SHT_STRTAB": "debug/elf",
"elf.SHT_SYMTAB": "debug/elf",
"elf.SHT_SYMTAB_SHNDX": "debug/elf",
"elf.STB_GLOBAL": "debug/elf",
"elf.STB_HIOS": "debug/elf",
"elf.STB_HIPROC": "debug/elf",
"elf.STB_LOCAL": "debug/elf",
"elf.STB_LOOS": "debug/elf",
"elf.STB_LOPROC": "debug/elf",
"elf.STB_WEAK": "debug/elf",
"elf.STT_COMMON": "debug/elf",
"elf.STT_FILE": "debug/elf",
"elf.STT_FUNC": "debug/elf",
"elf.STT_HIOS": "debug/elf",
"elf.STT_HIPROC": "debug/elf",
"elf.STT_LOOS": "debug/elf",
"elf.STT_LOPROC": "debug/elf",
"elf.STT_NOTYPE": "debug/elf",
"elf.STT_OBJECT": "debug/elf",
"elf.STT_SECTION": "debug/elf",
"elf.STT_TLS": "debug/elf",
"elf.STV_DEFAULT": "debug/elf",
"elf.STV_HIDDEN": "debug/elf",
"elf.STV_INTERNAL": "debug/elf",
"elf.STV_PROTECTED": "debug/elf",
"elf.ST_BIND": "debug/elf",
"elf.ST_INFO": "debug/elf",
"elf.ST_TYPE": "debug/elf",
"elf.ST_VISIBILITY": "debug/elf",
"elf.Section": "debug/elf",
"elf.Section32": "debug/elf",
"elf.Section64": "debug/elf",
"elf.SectionFlag": "debug/elf",
"elf.SectionHeader": "debug/elf",
"elf.SectionIndex": "debug/elf",
"elf.SectionType": "debug/elf",
"elf.Sym32": "debug/elf",
"elf.Sym32Size": "debug/elf",
"elf.Sym64": "debug/elf",
"elf.Sym64Size": "debug/elf",
"elf.SymBind": "debug/elf",
"elf.SymType": "debug/elf",
"elf.SymVis": "debug/elf",
"elf.Symbol": "debug/elf",
"elf.Type": "debug/elf",
"elf.Version": "debug/elf",
"elliptic.Curve": "crypto/elliptic",
"elliptic.CurveParams": "crypto/elliptic",
"elliptic.GenerateKey": "crypto/elliptic",
"elliptic.Marshal": "crypto/elliptic",
"elliptic.P224": "crypto/elliptic",
"elliptic.P256": "crypto/elliptic",
"elliptic.P384": "crypto/elliptic",
"elliptic.P521": "crypto/elliptic",
"elliptic.Unmarshal": "crypto/elliptic",
"encoding.BinaryMarshaler": "encoding",
"encoding.BinaryUnmarshaler": "encoding",
"encoding.TextMarshaler": "encoding",
"encoding.TextUnmarshaler": "encoding",
"errors.New": "errors",
"exec.Cmd": "os/exec",
"exec.Command": "os/exec",
"exec.ErrNotFound": "os/exec",
"exec.Error": "os/exec",
"exec.ExitError": "os/exec",
"exec.LookPath": "os/exec",
"expvar.Do": "expvar",
"expvar.Float": "expvar",
"expvar.Func": "expvar",
"expvar.Get": "expvar",
"expvar.Int": "expvar",
"expvar.KeyValue": "expvar",
"expvar.Map": "expvar",
"expvar.NewFloat": "expvar",
"expvar.NewInt": "expvar",
"expvar.NewMap": "expvar",
"expvar.NewString": "expvar",
"expvar.Publish": "expvar",
"expvar.String": "expvar",
"expvar.Var": "expvar",
"fcgi.ErrConnClosed": "net/http/fcgi",
"fcgi.ErrRequestAborted": "net/http/fcgi",
"fcgi.Serve": "net/http/fcgi",
"filepath.Abs": "path/filepath",
"filepath.Base": "path/filepath",
"filepath.Clean": "path/filepath",
"filepath.Dir": "path/filepath",
"filepath.ErrBadPattern": "path/filepath",
"filepath.EvalSymlinks": "path/filepath",
"filepath.Ext": "path/filepath",
"filepath.FromSlash": "path/filepath",
"filepath.Glob": "path/filepath",
"filepath.HasPrefix": "path/filepath",
"filepath.IsAbs": "path/filepath",
"filepath.Join": "path/filepath",
"filepath.ListSeparator": "path/filepath",
"filepath.Match": "path/filepath",
"filepath.Rel": "path/filepath",
"filepath.Separator": "path/filepath",
"filepath.SkipDir": "path/filepath",
"filepath.Split": "path/filepath",
"filepath.SplitList": "path/filepath",
"filepath.ToSlash": "path/filepath",
"filepath.VolumeName": "path/filepath",
"filepath.Walk": "path/filepath",
"filepath.WalkFunc": "path/filepath",
"flag.Arg": "flag",
"flag.Args": "flag",
"flag.Bool": "flag",
"flag.BoolVar": "flag",
"flag.CommandLine": "flag",
"flag.ContinueOnError": "flag",
"flag.Duration": "flag",
"flag.DurationVar": "flag",
"flag.ErrHelp": "flag",
"flag.ErrorHandling": "flag",
"flag.ExitOnError": "flag",
"flag.Flag": "flag",
"flag.FlagSet": "flag",
"flag.Float64": "flag",
"flag.Float64Var": "flag",
"flag.Getter": "flag",
"flag.Int": "flag",
"flag.Int64": "flag",
"flag.Int64Var": "flag",
"flag.IntVar": "flag",
"flag.Lookup": "flag",
"flag.NArg": "flag",
"flag.NFlag": "flag",
"flag.NewFlagSet": "flag",
"flag.PanicOnError": "flag",
"flag.Parse": "flag",
"flag.Parsed": "flag",
"flag.PrintDefaults": "flag",
"flag.Set": "flag",
"flag.String": "flag",
"flag.StringVar": "flag",
"flag.Uint": "flag",
"flag.Uint64": "flag",
"flag.Uint64Var": "flag",
"flag.UintVar": "flag",
"flag.UnquoteUsage": "flag",
"flag.Usage": "flag",
"flag.Value": "flag",
"flag.Var": "flag",
"flag.Visit": "flag",
"flag.VisitAll": "flag",
"flate.BestCompression": "compress/flate",
"flate.BestSpeed": "compress/flate",
"flate.CorruptInputError": "compress/flate",
"flate.DefaultCompression": "compress/flate",
"flate.InternalError": "compress/flate",
"flate.NewReader": "compress/flate",
"flate.NewReaderDict": "compress/flate",
"flate.NewWriter": "compress/flate",
"flate.NewWriterDict": "compress/flate",
"flate.NoCompression": "compress/flate",
"flate.ReadError": "compress/flate",
"flate.Reader": "compress/flate",
"flate.Resetter": "compress/flate",
"flate.WriteError": "compress/flate",
"flate.Writer": "compress/flate",
"fmt.Errorf": "fmt",
"fmt.Formatter": "fmt",
"fmt.Fprint": "fmt",
"fmt.Fprintf": "fmt",
"fmt.Fprintln": "fmt",
"fmt.Fscan": "fmt",
"fmt.Fscanf": "fmt",
"fmt.Fscanln": "fmt",
"fmt.GoStringer": "fmt",
"fmt.Print": "fmt",
"fmt.Printf": "fmt",
"fmt.Println": "fmt",
"fmt.Scan": "fmt",
"fmt.ScanState": "fmt",
"fmt.Scanf": "fmt",
"fmt.Scanln": "fmt",
"fmt.Scanner": "fmt",
"fmt.Sprint": "fmt",
"fmt.Sprintf": "fmt",
"fmt.Sprintln": "fmt",
"fmt.Sscan": "fmt",
"fmt.Sscanf": "fmt",
"fmt.Sscanln": "fmt",
"fmt.State": "fmt",
"fmt.Stringer": "fmt",
"fnv.New32": "hash/fnv",
"fnv.New32a": "hash/fnv",
"fnv.New64": "hash/fnv",
"fnv.New64a": "hash/fnv",
"format.Node": "go/format",
"format.Source": "go/format",
"gif.Decode": "image/gif",
"gif.DecodeAll": "image/gif",
"gif.DecodeConfig": "image/gif",
"gif.DisposalBackground": "image/gif",
"gif.DisposalNone": "image/gif",
"gif.DisposalPrevious": "image/gif",
"gif.Encode": "image/gif",
"gif.EncodeAll": "image/gif",
"gif.GIF": "image/gif",
"gif.Options": "image/gif",
"gob.CommonType": "encoding/gob",
"gob.Decoder": "encoding/gob",
"gob.Encoder": "encoding/gob",
"gob.GobDecoder": "encoding/gob",
"gob.GobEncoder": "encoding/gob",
"gob.NewDecoder": "encoding/gob",
"gob.NewEncoder": "encoding/gob",
"gob.Register": "encoding/gob",
"gob.RegisterName": "encoding/gob",
"gosym.DecodingError": "debug/gosym",
"gosym.Func": "debug/gosym",
"gosym.LineTable": "debug/gosym",
"gosym.NewLineTable": "debug/gosym",
"gosym.NewTable": "debug/gosym",
"gosym.Obj": "debug/gosym",
"gosym.Sym": "debug/gosym",
"gosym.Table": "debug/gosym",
"gosym.UnknownFileError": "debug/gosym",
"gosym.UnknownLineError": "debug/gosym",
"gzip.BestCompression": "compress/gzip",
"gzip.BestSpeed": "compress/gzip",
"gzip.DefaultCompression": "compress/gzip",
"gzip.ErrChecksum": "compress/gzip",
"gzip.ErrHeader": "compress/gzip",
"gzip.Header": "compress/gzip",
"gzip.NewReader": "compress/gzip",
"gzip.NewWriter": "compress/gzip",
"gzip.NewWriterLevel": "compress/gzip",
"gzip.NoCompression": "compress/gzip",
"gzip.Reader": "compress/gzip",
"gzip.Writer": "compress/gzip",
"hash.Hash": "hash",
"hash.Hash32": "hash",
"hash.Hash64": "hash",
"heap.Fix": "container/heap",
"heap.Init": "container/heap",
"heap.Interface": "container/heap",
"heap.Pop": "container/heap",
"heap.Push": "container/heap",
"heap.Remove": "container/heap",
"hex.Decode": "encoding/hex",
"hex.DecodeString": "encoding/hex",
"hex.DecodedLen": "encoding/hex",
"hex.Dump": "encoding/hex",
"hex.Dumper": "encoding/hex",
"hex.Encode": "encoding/hex",
"hex.EncodeToString": "encoding/hex",
"hex.EncodedLen": "encoding/hex",
"hex.ErrLength": "encoding/hex",
"hex.InvalidByteError": "encoding/hex",
"hmac.Equal": "crypto/hmac",
"hmac.New": "crypto/hmac",
"html.EscapeString": "html",
"html.UnescapeString": "html",
"http.CanonicalHeaderKey": "net/http",
"http.Client": "net/http",
"http.CloseNotifier": "net/http",
"http.ConnState": "net/http",
"http.Cookie": "net/http",
"http.CookieJar": "net/http",
"http.DefaultClient": "net/http",
"http.DefaultMaxHeaderBytes": "net/http",
"http.DefaultMaxIdleConnsPerHost": "net/http",
"http.DefaultServeMux": "net/http",
"http.DefaultTransport": "net/http",
"http.DetectContentType": "net/http",
"http.Dir": "net/http",
"http.ErrBodyNotAllowed": "net/http",
"http.ErrBodyReadAfterClose": "net/http",
"http.ErrContentLength": "net/http",
"http.ErrHandlerTimeout": "net/http",
"http.ErrHeaderTooLong": "net/http",
"http.ErrHijacked": "net/http",
"http.ErrLineTooLong": "net/http",
"http.ErrMissingBoundary": "net/http",
"http.ErrMissingContentLength": "net/http",
"http.ErrMissingFile": "net/http",
"http.ErrNoCookie": "net/http",
"http.ErrNoLocation": "net/http",
"http.ErrNotMultipart": "net/http",
"http.ErrNotSupported": "net/http",
"http.ErrShortBody": "net/http",
"http.ErrUnexpectedTrailer": "net/http",
"http.ErrWriteAfterFlush": "net/http",
"http.Error": "net/http",
"http.File": "net/http",
"http.FileServer": "net/http",
"http.FileSystem": "net/http",
"http.Flusher": "net/http",
"http.Get": "net/http",
"http.Handle": "net/http",
"http.HandleFunc": "net/http",
"http.Handler": "net/http",
"http.HandlerFunc": "net/http",
"http.Head": "net/http",
"http.Header": "net/http",
"http.Hijacker": "net/http",
"http.ListenAndServe": "net/http",
"http.ListenAndServeTLS": "net/http",
"http.MaxBytesReader": "net/http",
"http.NewFileTransport": "net/http",
"http.NewRequest": "net/http",
"http.NewServeMux": "net/http",
"http.NotFound": "net/http",
"http.NotFoundHandler": "net/http",
"http.ParseHTTPVersion": "net/http",
"http.ParseTime": "net/http",
"http.Post": "net/http",
"http.PostForm": "net/http",
"http.ProtocolError": "net/http",
"http.ProxyFromEnvironment": "net/http",
"http.ProxyURL": "net/http",
"http.ReadRequest": "net/http",
"http.ReadResponse": "net/http",
"http.Redirect": "net/http",
"http.RedirectHandler": "net/http",
"http.Request": "net/http",
"http.Response": "net/http",
"http.ResponseWriter": "net/http",
"http.RoundTripper": "net/http",
"http.Serve": "net/http",
"http.ServeContent": "net/http",
"http.ServeFile": "net/http",
"http.ServeMux": "net/http",
"http.Server": "net/http",
"http.SetCookie": "net/http",
"http.StateActive": "net/http",
"http.StateClosed": "net/http",
"http.StateHijacked": "net/http",
"http.StateIdle": "net/http",
"http.StateNew": "net/http",
"http.StatusAccepted": "net/http",
"http.StatusBadGateway": "net/http",
"http.StatusBadRequest": "net/http",
"http.StatusConflict": "net/http",
"http.StatusContinue": "net/http",
"http.StatusCreated": "net/http",
"http.StatusExpectationFailed": "net/http",
"http.StatusForbidden": "net/http",
"http.StatusFound": "net/http",
"http.StatusGatewayTimeout": "net/http",
"http.StatusGone": "net/http",
"http.StatusHTTPVersionNotSupported": "net/http",
"http.StatusInternalServerError": "net/http",
"http.StatusLengthRequired": "net/http",
"http.StatusMethodNotAllowed": "net/http",
"http.StatusMovedPermanently": "net/http",
"http.StatusMultipleChoices": "net/http",
"http.StatusNoContent": "net/http",
"http.StatusNonAuthoritativeInfo": "net/http",
"http.StatusNotAcceptable": "net/http",
"http.StatusNotFound": "net/http",
"http.StatusNotImplemented": "net/http",
"http.StatusNotModified": "net/http",
"http.StatusOK": "net/http",
"http.StatusPartialContent": "net/http",
"http.StatusPaymentRequired": "net/http",
"http.StatusPreconditionFailed": "net/http",
"http.StatusProxyAuthRequired": "net/http",
"http.StatusRequestEntityTooLarge": "net/http",
"http.StatusRequestTimeout": "net/http",
"http.StatusRequestURITooLong": "net/http",
"http.StatusRequestedRangeNotSatisfiable": "net/http",
"http.StatusResetContent": "net/http",
"http.StatusSeeOther": "net/http",
"http.StatusServiceUnavailable": "net/http",
"http.StatusSwitchingProtocols": "net/http",
"http.StatusTeapot": "net/http",
"http.StatusTemporaryRedirect": "net/http",
"http.StatusText": "net/http",
"http.StatusUnauthorized": "net/http",
"http.StatusUnsupportedMediaType": "net/http",
"http.StatusUseProxy": "net/http",
"http.StripPrefix": "net/http",
"http.TimeFormat": "net/http",
"http.TimeoutHandler": "net/http",
"http.Transport": "net/http",
"httptest.DefaultRemoteAddr": "net/http/httptest",
"httptest.NewRecorder": "net/http/httptest",
"httptest.NewServer": "net/http/httptest",
"httptest.NewTLSServer": "net/http/httptest",
"httptest.NewUnstartedServer": "net/http/httptest",
"httptest.ResponseRecorder": "net/http/httptest",
"httptest.Server": "net/http/httptest",
"httputil.ClientConn": "net/http/httputil",
"httputil.DumpRequest": "net/http/httputil",
"httputil.DumpRequestOut": "net/http/httputil",
"httputil.DumpResponse": "net/http/httputil",
"httputil.ErrClosed": "net/http/httputil",
"httputil.ErrLineTooLong": "net/http/httputil",
"httputil.ErrPersistEOF": "net/http/httputil",
"httputil.ErrPipeline": "net/http/httputil",
"httputil.NewChunkedReader": "net/http/httputil",
"httputil.NewChunkedWriter": "net/http/httputil",
"httputil.NewClientConn": "net/http/httputil",
"httputil.NewProxyClientConn": "net/http/httputil",
"httputil.NewServerConn": "net/http/httputil",
"httputil.NewSingleHostReverseProxy": "net/http/httputil",
"httputil.ReverseProxy": "net/http/httputil",
"httputil.ServerConn": "net/http/httputil",
"image.Alpha": "image",
"image.Alpha16": "image",
"image.Black": "image",
"image.CMYK": "image",
"image.Config": "image",
"image.Decode": "image",
"image.DecodeConfig": "image",
"image.ErrFormat": "image",
"image.Gray": "image",
"image.Gray16": "image",
"image.Image": "image",
"image.NRGBA": "image",
"image.NRGBA64": "image",
"image.NewAlpha": "image",
"image.NewAlpha16": "image",
"image.NewCMYK": "image",
"image.NewGray": "image",
"image.NewGray16": "image",
"image.NewNRGBA": "image",
"image.NewNRGBA64": "image",
"image.NewPaletted": "image",
"image.NewRGBA": "image",
"image.NewRGBA64": "image",
"image.NewUniform": "image",
"image.NewYCbCr": "image",
"image.Opaque": "image",
"image.Paletted": "image",
"image.PalettedImage": "image",
"image.Point": "image",
"image.Pt": "image",
"image.RGBA": "image",
"image.RGBA64": "image",
"image.Rect": "image",
"image.Rectangle": "image",
"image.RegisterFormat": "image",
"image.Transparent": "image",
"image.Uniform": "image",
"image.White": "image",
"image.YCbCr": "image",
"image.YCbCrSubsampleRatio": "image",
"image.YCbCrSubsampleRatio410": "image",
"image.YCbCrSubsampleRatio411": "image",
"image.YCbCrSubsampleRatio420": "image",
"image.YCbCrSubsampleRatio422": "image",
"image.YCbCrSubsampleRatio440": "image",
"image.YCbCrSubsampleRatio444": "image",
"image.ZP": "image",
"image.ZR": "image",
"importer.Default": "go/importer",
"importer.For": "go/importer",
"importer.Lookup": "go/importer",
"io.ByteReader": "io",
"io.ByteScanner": "io",
"io.ByteWriter": "io",
"io.Closer": "io",
"io.Copy": "io",
"io.CopyBuffer": "io",
"io.CopyN": "io",
"io.EOF": "io",
"io.ErrClosedPipe": "io",
"io.ErrNoProgress": "io",
"io.ErrShortBuffer": "io",
"io.ErrShortWrite": "io",
"io.ErrUnexpectedEOF": "io",
"io.LimitReader": "io",
"io.LimitedReader": "io",
"io.MultiReader": "io",
"io.MultiWriter": "io",
"io.NewSectionReader": "io",
"io.Pipe": "io",
"io.PipeReader": "io",
"io.PipeWriter": "io",
"io.ReadAtLeast": "io",
"io.ReadCloser": "io",
"io.ReadFull": "io",
"io.ReadSeeker": "io",
"io.ReadWriteCloser": "io",
"io.ReadWriteSeeker": "io",
"io.ReadWriter": "io",
"io.Reader": "io",
"io.ReaderAt": "io",
"io.ReaderFrom": "io",
"io.RuneReader": "io",
"io.RuneScanner": "io",
"io.SectionReader": "io",
"io.Seeker": "io",
"io.TeeReader": "io",
"io.WriteCloser": "io",
"io.WriteSeeker": "io",
"io.WriteString": "io",
"io.Writer": "io",
"io.WriterAt": "io",
"io.WriterTo": "io",
"iotest.DataErrReader": "testing/iotest",
"iotest.ErrTimeout": "testing/iotest",
"iotest.HalfReader": "testing/iotest",
"iotest.NewReadLogger": "testing/iotest",
"iotest.NewWriteLogger": "testing/iotest",
"iotest.OneByteReader": "testing/iotest",
"iotest.TimeoutReader": "testing/iotest",
"iotest.TruncateWriter": "testing/iotest",
"ioutil.Discard": "io/ioutil",
"ioutil.NopCloser": "io/ioutil",
"ioutil.ReadAll": "io/ioutil",
"ioutil.ReadDir": "io/ioutil",
"ioutil.ReadFile": "io/ioutil",
"ioutil.TempDir": "io/ioutil",
"ioutil.TempFile": "io/ioutil",
"ioutil.WriteFile": "io/ioutil",
"jpeg.Decode": "image/jpeg",
"jpeg.DecodeConfig": "image/jpeg",
"jpeg.DefaultQuality": "image/jpeg",
"jpeg.Encode": "image/jpeg",
"jpeg.FormatError": "image/jpeg",
"jpeg.Options": "image/jpeg",
"jpeg.Reader": "image/jpeg",
"jpeg.UnsupportedError": "image/jpeg",
"json.Compact": "encoding/json",
"json.Decoder": "encoding/json",
"json.Delim": "encoding/json",
"json.Encoder": "encoding/json",
"json.HTMLEscape": "encoding/json",
"json.Indent": "encoding/json",
"json.InvalidUTF8Error": "encoding/json",
"json.InvalidUnmarshalError": "encoding/json",
"json.Marshal": "encoding/json",
"json.MarshalIndent": "encoding/json",
"json.Marshaler": "encoding/json",
"json.MarshalerError": "encoding/json",
"json.NewDecoder": "encoding/json",
"json.NewEncoder": "encoding/json",
"json.Number": "encoding/json",
"json.RawMessage": "encoding/json",
"json.SyntaxError": "encoding/json",
"json.Token": "encoding/json",
"json.Unmarshal": "encoding/json",
"json.UnmarshalFieldError": "encoding/json",
"json.UnmarshalTypeError": "encoding/json",
"json.Unmarshaler": "encoding/json",
"json.UnsupportedTypeError": "encoding/json",
"json.UnsupportedValueError": "encoding/json",
"jsonrpc.Dial": "net/rpc/jsonrpc",
"jsonrpc.NewClient": "net/rpc/jsonrpc",
"jsonrpc.NewClientCodec": "net/rpc/jsonrpc",
"jsonrpc.NewServerCodec": "net/rpc/jsonrpc",
"jsonrpc.ServeConn": "net/rpc/jsonrpc",
"list.Element": "container/list",
"list.List": "container/list",
"list.New": "container/list",
"log.Fatal": "log",
"log.Fatalf": "log",
"log.Fatalln": "log",
"log.Flags": "log",
"log.LUTC": "log",
"log.Ldate": "log",
"log.Llongfile": "log",
"log.Lmicroseconds": "log",
"log.Logger": "log",
"log.Lshortfile": "log",
"log.LstdFlags": "log",
"log.Ltime": "log",
"log.New": "log",
"log.Output": "log",
"log.Panic": "log",
"log.Panicf": "log",
"log.Panicln": "log",
"log.Prefix": "log",
"log.Print": "log",
"log.Printf": "log",
"log.Println": "log",
"log.SetFlags": "log",
"log.SetOutput": "log",
"log.SetPrefix": "log",
"lzw.LSB": "compress/lzw",
"lzw.MSB": "compress/lzw",
"lzw.NewReader": "compress/lzw",
"lzw.NewWriter": "compress/lzw",
"lzw.Order": "compress/lzw",
"macho.Cpu": "debug/macho",
"macho.Cpu386": "debug/macho",
"macho.CpuAmd64": "debug/macho",
"macho.CpuArm": "debug/macho",
"macho.CpuPpc": "debug/macho",
"macho.CpuPpc64": "debug/macho",
"macho.Dylib": "debug/macho",
"macho.DylibCmd": "debug/macho",
"macho.Dysymtab": "debug/macho",
"macho.DysymtabCmd": "debug/macho",
"macho.ErrNotFat": "debug/macho",
"macho.FatArch": "debug/macho",
"macho.FatArchHeader": "debug/macho",
"macho.FatFile": "debug/macho",
"macho.File": "debug/macho",
"macho.FileHeader": "debug/macho",
"macho.FormatError": "debug/macho",
"macho.Load": "debug/macho",
"macho.LoadBytes": "debug/macho",
"macho.LoadCmd": "debug/macho",
"macho.LoadCmdDylib": "debug/macho",
"macho.LoadCmdDylinker": "debug/macho",
"macho.LoadCmdDysymtab": "debug/macho",
"macho.LoadCmdSegment": "debug/macho",
"macho.LoadCmdSegment64": "debug/macho",
"macho.LoadCmdSymtab": "debug/macho",
"macho.LoadCmdThread": "debug/macho",
"macho.LoadCmdUnixThread": "debug/macho",
"macho.Magic32": "debug/macho",
"macho.Magic64": "debug/macho",
"macho.MagicFat": "debug/macho",
"macho.NewFatFile": "debug/macho",
"macho.NewFile": "debug/macho",
"macho.Nlist32": "debug/macho",
"macho.Nlist64": "debug/macho",
"macho.Open": "debug/macho",
"macho.OpenFat": "debug/macho",
"macho.Regs386": "debug/macho",
"macho.RegsAMD64": "debug/macho",
"macho.Section": "debug/macho",
"macho.Section32": "debug/macho",
"macho.Section64": "debug/macho",
"macho.SectionHeader": "debug/macho",
"macho.Segment": "debug/macho",
"macho.Segment32": "debug/macho",
"macho.Segment64": "debug/macho",
"macho.SegmentHeader": "debug/macho",
"macho.Symbol": "debug/macho",
"macho.Symtab": "debug/macho",
"macho.SymtabCmd": "debug/macho",
"macho.Thread": "debug/macho",
"macho.Type": "debug/macho",
"macho.TypeBundle": "debug/macho",
"macho.TypeDylib": "debug/macho",
"macho.TypeExec": "debug/macho",
"macho.TypeObj": "debug/macho",
"mail.Address": "net/mail",
"mail.AddressParser": "net/mail",
"mail.ErrHeaderNotPresent": "net/mail",
"mail.Header": "net/mail",
"mail.Message": "net/mail",
"mail.ParseAddress": "net/mail",
"mail.ParseAddressList": "net/mail",
"mail.ReadMessage": "net/mail",
"math.Abs": "math",
"math.Acos": "math",
"math.Acosh": "math",
"math.Asin": "math",
"math.Asinh": "math",
"math.Atan": "math",
"math.Atan2": "math",
"math.Atanh": "math",
"math.Cbrt": "math",
"math.Ceil": "math",
"math.Copysign": "math",
"math.Cos": "math",
"math.Cosh": "math",
"math.Dim": "math",
"math.E": "math",
"math.Erf": "math",
"math.Erfc": "math",
"math.Exp": "math",
"math.Exp2": "math",
"math.Expm1": "math",
"math.Float32bits": "math",
"math.Float32frombits": "math",
"math.Float64bits": "math",
"math.Float64frombits": "math",
"math.Floor": "math",
"math.Frexp": "math",
"math.Gamma": "math",
"math.Hypot": "math",
"math.Ilogb": "math",
"math.Inf": "math",
"math.IsInf": "math",
"math.IsNaN": "math",
"math.J0": "math",
"math.J1": "math",
"math.Jn": "math",
"math.Ldexp": "math",
"math.Lgamma": "math",
"math.Ln10": "math",
"math.Ln2": "math",
"math.Log": "math",
"math.Log10": "math",
"math.Log10E": "math",
"math.Log1p": "math",
"math.Log2": "math",
"math.Log2E": "math",
"math.Logb": "math",
"math.Max": "math",
"math.MaxFloat32": "math",
"math.MaxFloat64": "math",
"math.MaxInt16": "math",
"math.MaxInt32": "math",
"math.MaxInt64": "math",
"math.MaxInt8": "math",
"math.MaxUint16": "math",
"math.MaxUint32": "math",
"math.MaxUint64": "math",
"math.MaxUint8": "math",
"math.Min": "math",
"math.MinInt16": "math",
"math.MinInt32": "math",
"math.MinInt64": "math",
"math.MinInt8": "math",
"math.Mod": "math",
"math.Modf": "math",
"math.NaN": "math",
"math.Nextafter": "math",
"math.Nextafter32": "math",
"math.Phi": "math",
"math.Pi": "math",
"math.Pow": "math",
"math.Pow10": "math",
"math.Remainder": "math",
"math.Signbit": "math",
"math.Sin": "math",
"math.Sincos": "math",
"math.Sinh": "math",
"math.SmallestNonzeroFloat32": "math",
"math.SmallestNonzeroFloat64": "math",
"math.Sqrt": "math",
"math.Sqrt2": "math",
"math.SqrtE": "math",
"math.SqrtPhi": "math",
"math.SqrtPi": "math",
"math.Tan": "math",
"math.Tanh": "math",
"math.Trunc": "math",
"math.Y0": "math",
"math.Y1": "math",
"math.Yn": "math",
"md5.BlockSize": "crypto/md5",
"md5.New": "crypto/md5",
"md5.Size": "crypto/md5",
"md5.Sum": "crypto/md5",
"mime.AddExtensionType": "mime",
"mime.BEncoding": "mime",
"mime.ExtensionsByType": "mime",
"mime.FormatMediaType": "mime",
"mime.ParseMediaType": "mime",
"mime.QEncoding": "mime",
"mime.TypeByExtension": "mime",
"mime.WordDecoder": "mime",
"mime.WordEncoder": "mime",
"multipart.File": "mime/multipart",
"multipart.FileHeader": "mime/multipart",
"multipart.Form": "mime/multipart",
"multipart.NewReader": "mime/multipart",
"multipart.NewWriter": "mime/multipart",
"multipart.Part": "mime/multipart",
"multipart.Reader": "mime/multipart",
"multipart.Writer": "mime/multipart",
"net.Addr": "net",
"net.AddrError": "net",
"net.CIDRMask": "net",
"net.Conn": "net",
"net.DNSConfigError": "net",
"net.DNSError": "net",
"net.Dial": "net",
"net.DialIP": "net",
"net.DialTCP": "net",
"net.DialTimeout": "net",
"net.DialUDP": "net",
"net.DialUnix": "net",
"net.Dialer": "net",
"net.ErrWriteToConnected": "net",
"net.Error": "net",
"net.FileConn": "net",
"net.FileListener": "net",
"net.FilePacketConn": "net",
"net.FlagBroadcast": "net",
"net.FlagLoopback": "net",
"net.FlagMulticast": "net",
"net.FlagPointToPoint": "net",
"net.FlagUp": "net",
"net.Flags": "net",
"net.HardwareAddr": "net",
"net.IP": "net",
"net.IPAddr": "net",
"net.IPConn": "net",
"net.IPMask": "net",
"net.IPNet": "net",
"net.IPv4": "net",
"net.IPv4Mask": "net",
"net.IPv4allrouter": "net",
"net.IPv4allsys": "net",
"net.IPv4bcast": "net",
"net.IPv4len": "net",
"net.IPv4zero": "net",
"net.IPv6interfacelocalallnodes": "net",
"net.IPv6len": "net",
"net.IPv6linklocalallnodes": "net",
"net.IPv6linklocalallrouters": "net",
"net.IPv6loopback": "net",
"net.IPv6unspecified": "net",
"net.IPv6zero": "net",
"net.Interface": "net",
"net.InterfaceAddrs": "net",
"net.InterfaceByIndex": "net",
"net.InterfaceByName": "net",
"net.Interfaces": "net",
"net.InvalidAddrError": "net",
"net.JoinHostPort": "net",
"net.Listen": "net",
"net.ListenIP": "net",
"net.ListenMulticastUDP": "net",
"net.ListenPacket": "net",
"net.ListenTCP": "net",
"net.ListenUDP": "net",
"net.ListenUnix": "net",
"net.ListenUnixgram": "net",
"net.Listener": "net",
"net.LookupAddr": "net",
"net.LookupCNAME": "net",
"net.LookupHost": "net",
"net.LookupIP": "net",
"net.LookupMX": "net",
"net.LookupNS": "net",
"net.LookupPort": "net",
"net.LookupSRV": "net",
"net.LookupTXT": "net",
"net.MX": "net",
"net.NS": "net",
"net.OpError": "net",
"net.PacketConn": "net",
"net.ParseCIDR": "net",
"net.ParseError": "net",
"net.ParseIP": "net",
"net.ParseMAC": "net",
"net.Pipe": "net",
"net.ResolveIPAddr": "net",
"net.ResolveTCPAddr": "net",
"net.ResolveUDPAddr": "net",
"net.ResolveUnixAddr": "net",
"net.SRV": "net",
"net.SplitHostPort": "net",
"net.TCPAddr": "net",
"net.TCPConn": "net",
"net.TCPListener": "net",
"net.UDPAddr": "net",
"net.UDPConn": "net",
"net.UnixAddr": "net",
"net.UnixConn": "net",
"net.UnixListener": "net",
"net.UnknownNetworkError": "net",
"os.Args": "os",
"os.Chdir": "os",
"os.Chmod": "os",
"os.Chown": "os",
"os.Chtimes": "os",
"os.Clearenv": "os",
"os.Create": "os",
"os.DevNull": "os",
"os.Environ": "os",
"os.ErrExist": "os",
"os.ErrInvalid": "os",
"os.ErrNotExist": "os",
"os.ErrPermission": "os",
"os.Exit": "os",
"os.Expand": "os",
"os.ExpandEnv": "os",
"os.File": "os",
"os.FileInfo": "os",
"os.FileMode": "os",
"os.FindProcess": "os",
"os.Getegid": "os",
"os.Getenv": "os",
"os.Geteuid": "os",
"os.Getgid": "os",
"os.Getgroups": "os",
"os.Getpagesize": "os",
"os.Getpid": "os",
"os.Getppid": "os",
"os.Getuid": "os",
"os.Getwd": "os",
"os.Hostname": "os",
"os.Interrupt": "os",
"os.IsExist": "os",
"os.IsNotExist": "os",
"os.IsPathSeparator": "os",
"os.IsPermission": "os",
"os.Kill": "os",
"os.Lchown": "os",
"os.Link": "os",
"os.LinkError": "os",
"os.LookupEnv": "os",
"os.Lstat": "os",
"os.Mkdir": "os",
"os.MkdirAll": "os",
"os.ModeAppend": "os",
"os.ModeCharDevice": "os",
"os.ModeDevice": "os",
"os.ModeDir": "os",
"os.ModeExclusive": "os",
"os.ModeNamedPipe": "os",
"os.ModePerm": "os",
"os.ModeSetgid": "os",
"os.ModeSetuid": "os",
"os.ModeSocket": "os",
"os.ModeSticky": "os",
"os.ModeSymlink": "os",
"os.ModeTemporary": "os",
"os.ModeType": "os",
"os.NewFile": "os",
"os.NewSyscallError": "os",
"os.O_APPEND": "os",
"os.O_CREATE": "os",
"os.O_EXCL": "os",
"os.O_RDONLY": "os",
"os.O_RDWR": "os",
"os.O_SYNC": "os",
"os.O_TRUNC": "os",
"os.O_WRONLY": "os",
"os.Open": "os",
"os.OpenFile": "os",
"os.PathError": "os",
"os.PathListSeparator": "os",
"os.PathSeparator": "os",
"os.Pipe": "os",
"os.ProcAttr": "os",
"os.Process": "os",
"os.ProcessState": "os",
"os.Readlink": "os",
"os.Remove": "os",
"os.RemoveAll": "os",
"os.Rename": "os",
"os.SEEK_CUR": "os",
"os.SEEK_END": "os",
"os.SEEK_SET": "os",
"os.SameFile": "os",
"os.Setenv": "os",
"os.Signal": "os",
"os.StartProcess": "os",
"os.Stat": "os",
"os.Stderr": "os",
"os.Stdin": "os",
"os.Stdout": "os",
"os.Symlink": "os",
"os.SyscallError": "os",
"os.TempDir": "os",
"os.Truncate": "os",
"os.Unsetenv": "os",
"palette.Plan9": "image/color/palette",
"palette.WebSafe": "image/color/palette",
"parse.ActionNode": "text/template/parse",
"parse.BoolNode": "text/template/parse",
"parse.BranchNode": "text/template/parse",
"parse.ChainNode": "text/template/parse",
"parse.CommandNode": "text/template/parse",
"parse.DotNode": "text/template/parse",
"parse.FieldNode": "text/template/parse",
"parse.IdentifierNode": "text/template/parse",
"parse.IfNode": "text/template/parse",
"parse.IsEmptyTree": "text/template/parse",
"parse.ListNode": "text/template/parse",
"parse.New": "text/template/parse",
"parse.NewIdentifier": "text/template/parse",
"parse.NilNode": "text/template/parse",
"parse.Node": "text/template/parse",
"parse.NodeAction": "text/template/parse",
"parse.NodeBool": "text/template/parse",
"parse.NodeChain": "text/template/parse",
"parse.NodeCommand": "text/template/parse",
"parse.NodeDot": "text/template/parse",
"parse.NodeField": "text/template/parse",
"parse.NodeIdentifier": "text/template/parse",
"parse.NodeIf": "text/template/parse",
"parse.NodeList": "text/template/parse",
"parse.NodeNil": "text/template/parse",
"parse.NodeNumber": "text/template/parse",
"parse.NodePipe": "text/template/parse",
"parse.NodeRange": "text/template/parse",
"parse.NodeString": "text/template/parse",
"parse.NodeTemplate": "text/template/parse",
"parse.NodeText": "text/template/parse",
"parse.NodeType": "text/template/parse",
"parse.NodeVariable": "text/template/parse",
"parse.NodeWith": "text/template/parse",
"parse.NumberNode": "text/template/parse",
"parse.Parse": "text/template/parse",
"parse.PipeNode": "text/template/parse",
"parse.Pos": "text/template/parse",
"parse.RangeNode": "text/template/parse",
"parse.StringNode": "text/template/parse",
"parse.TemplateNode": "text/template/parse",
"parse.TextNode": "text/template/parse",
"parse.Tree": "text/template/parse",
"parse.VariableNode": "text/template/parse",
"parse.WithNode": "text/template/parse",
"parser.AllErrors": "go/parser",
"parser.DeclarationErrors": "go/parser",
"parser.ImportsOnly": "go/parser",
"parser.Mode": "go/parser",
"parser.PackageClauseOnly": "go/parser",
"parser.ParseComments": "go/parser",
"parser.ParseDir": "go/parser",
"parser.ParseExpr": "go/parser",
"parser.ParseExprFrom": "go/parser",
"parser.ParseFile": "go/parser",
"parser.SpuriousErrors": "go/parser",
"parser.Trace": "go/parser",
"path.Base": "path",
"path.Clean": "path",
"path.Dir": "path",
"path.ErrBadPattern": "path",
"path.Ext": "path",
"path.IsAbs": "path",
"path.Join": "path",
"path.Match": "path",
"path.Split": "path",
"pe.COFFSymbol": "debug/pe",
"pe.COFFSymbolSize": "debug/pe",
"pe.DataDirectory": "debug/pe",
"pe.File": "debug/pe",
"pe.FileHeader": "debug/pe",
"pe.FormatError": "debug/pe",
"pe.IMAGE_FILE_MACHINE_AM33": "debug/pe",
"pe.IMAGE_FILE_MACHINE_AMD64": "debug/pe",
"pe.IMAGE_FILE_MACHINE_ARM": "debug/pe",
"pe.IMAGE_FILE_MACHINE_EBC": "debug/pe",
"pe.IMAGE_FILE_MACHINE_I386": "debug/pe",
"pe.IMAGE_FILE_MACHINE_IA64": "debug/pe",
"pe.IMAGE_FILE_MACHINE_M32R": "debug/pe",
"pe.IMAGE_FILE_MACHINE_MIPS16": "debug/pe",
"pe.IMAGE_FILE_MACHINE_MIPSFPU": "debug/pe",
"pe.IMAGE_FILE_MACHINE_MIPSFPU16": "debug/pe",
"pe.IMAGE_FILE_MACHINE_POWERPC": "debug/pe",
"pe.IMAGE_FILE_MACHINE_POWERPCFP": "debug/pe",
"pe.IMAGE_FILE_MACHINE_R4000": "debug/pe",
"pe.IMAGE_FILE_MACHINE_SH3": "debug/pe",
"pe.IMAGE_FILE_MACHINE_SH3DSP": "debug/pe",
"pe.IMAGE_FILE_MACHINE_SH4": "debug/pe",
"pe.IMAGE_FILE_MACHINE_SH5": "debug/pe",
"pe.IMAGE_FILE_MACHINE_THUMB": "debug/pe",
"pe.IMAGE_FILE_MACHINE_UNKNOWN": "debug/pe",
"pe.IMAGE_FILE_MACHINE_WCEMIPSV2": "debug/pe",
"pe.ImportDirectory": "debug/pe",
"pe.NewFile": "debug/pe",
"pe.Open": "debug/pe",
"pe.OptionalHeader32": "debug/pe",
"pe.OptionalHeader64": "debug/pe",
"pe.Section": "debug/pe",
"pe.SectionHeader": "debug/pe",
"pe.SectionHeader32": "debug/pe",
"pe.Symbol": "debug/pe",
"pem.Block": "encoding/pem",
"pem.Decode": "encoding/pem",
"pem.Encode": "encoding/pem",
"pem.EncodeToMemory": "encoding/pem",
"pkix.AlgorithmIdentifier": "crypto/x509/pkix",
"pkix.AttributeTypeAndValue": "crypto/x509/pkix",
"pkix.AttributeTypeAndValueSET": "crypto/x509/pkix",
"pkix.CertificateList": "crypto/x509/pkix",
"pkix.Extension": "crypto/x509/pkix",
"pkix.Name": "crypto/x509/pkix",
"pkix.RDNSequence": "crypto/x509/pkix",
"pkix.RelativeDistinguishedNameSET": "crypto/x509/pkix",
"pkix.RevokedCertificate": "crypto/x509/pkix",
"pkix.TBSCertificateList": "crypto/x509/pkix",
"plan9obj.File": "debug/plan9obj",
"plan9obj.FileHeader": "debug/plan9obj",
"plan9obj.Magic386": "debug/plan9obj",
"plan9obj.Magic64": "debug/plan9obj",
"plan9obj.MagicAMD64": "debug/plan9obj",
"plan9obj.MagicARM": "debug/plan9obj",
"plan9obj.NewFile": "debug/plan9obj",
"plan9obj.Open": "debug/plan9obj",
"plan9obj.Section": "debug/plan9obj",
"plan9obj.SectionHeader": "debug/plan9obj",
"plan9obj.Sym": "debug/plan9obj",
"png.BestCompression": "image/png",
"png.BestSpeed": "image/png",
"png.CompressionLevel": "image/png",
"png.Decode": "image/png",
"png.DecodeConfig": "image/png",
"png.DefaultCompression": "image/png",
"png.Encode": "image/png",
"png.Encoder": "image/png",
"png.FormatError": "image/png",
"png.NoCompression": "image/png",
"png.UnsupportedError": "image/png",
"pprof.Cmdline": "net/http/pprof",
"pprof.Handler": "net/http/pprof",
"pprof.Index": "net/http/pprof",
"pprof.Lookup": "runtime/pprof",
"pprof.NewProfile": "runtime/pprof",
// "pprof.Profile" is ambiguous
"pprof.Profiles": "runtime/pprof",
"pprof.StartCPUProfile": "runtime/pprof",
"pprof.StopCPUProfile": "runtime/pprof",
"pprof.Symbol": "net/http/pprof",
"pprof.Trace": "net/http/pprof",
"pprof.WriteHeapProfile": "runtime/pprof",
"printer.CommentedNode": "go/printer",
"printer.Config": "go/printer",
"printer.Fprint": "go/printer",
"printer.Mode": "go/printer",
"printer.RawFormat": "go/printer",
"printer.SourcePos": "go/printer",
"printer.TabIndent": "go/printer",
"printer.UseSpaces": "go/printer",
"quick.Check": "testing/quick",
"quick.CheckEqual": "testing/quick",
"quick.CheckEqualError": "testing/quick",
"quick.CheckError": "testing/quick",
"quick.Config": "testing/quick",
"quick.Generator": "testing/quick",
"quick.SetupError": "testing/quick",
"quick.Value": "testing/quick",
"quotedprintable.NewReader": "mime/quotedprintable",
"quotedprintable.NewWriter": "mime/quotedprintable",
"quotedprintable.Reader": "mime/quotedprintable",
"quotedprintable.Writer": "mime/quotedprintable",
"rand.ExpFloat64": "math/rand",
"rand.Float32": "math/rand",
"rand.Float64": "math/rand",
// "rand.Int" is ambiguous
"rand.Int31": "math/rand",
"rand.Int31n": "math/rand",
"rand.Int63": "math/rand",
"rand.Int63n": "math/rand",
"rand.Intn": "math/rand",
"rand.New": "math/rand",
"rand.NewSource": "math/rand",
"rand.NewZipf": "math/rand",
"rand.NormFloat64": "math/rand",
"rand.Perm": "math/rand",
"rand.Prime": "crypto/rand",
"rand.Rand": "math/rand",
"rand.Read": "crypto/rand",
"rand.Reader": "crypto/rand",
"rand.Seed": "math/rand",
"rand.Source": "math/rand",
"rand.Uint32": "math/rand",
"rand.Zipf": "math/rand",
"rc4.Cipher": "crypto/rc4",
"rc4.KeySizeError": "crypto/rc4",
"rc4.NewCipher": "crypto/rc4",
"reflect.Append": "reflect",
"reflect.AppendSlice": "reflect",
"reflect.Array": "reflect",
"reflect.ArrayOf": "reflect",
"reflect.Bool": "reflect",
"reflect.BothDir": "reflect",
"reflect.Chan": "reflect",
"reflect.ChanDir": "reflect",
"reflect.ChanOf": "reflect",
"reflect.Complex128": "reflect",
"reflect.Complex64": "reflect",
"reflect.Copy": "reflect",
"reflect.DeepEqual": "reflect",
"reflect.Float32": "reflect",
"reflect.Float64": "reflect",
"reflect.Func": "reflect",
"reflect.FuncOf": "reflect",
"reflect.Indirect": "reflect",
"reflect.Int": "reflect",
"reflect.Int16": "reflect",
"reflect.Int32": "reflect",
"reflect.Int64": "reflect",
"reflect.Int8": "reflect",
"reflect.Interface": "reflect",
"reflect.Invalid": "reflect",
"reflect.Kind": "reflect",
"reflect.MakeChan": "reflect",
"reflect.MakeFunc": "reflect",
"reflect.MakeMap": "reflect",
"reflect.MakeSlice": "reflect",
"reflect.Map": "reflect",
"reflect.MapOf": "reflect",
"reflect.Method": "reflect",
"reflect.New": "reflect",
"reflect.NewAt": "reflect",
"reflect.Ptr": "reflect",
"reflect.PtrTo": "reflect",
"reflect.RecvDir": "reflect",
"reflect.Select": "reflect",
"reflect.SelectCase": "reflect",
"reflect.SelectDefault": "reflect",
"reflect.SelectDir": "reflect",
"reflect.SelectRecv": "reflect",
"reflect.SelectSend": "reflect",
"reflect.SendDir": "reflect",
"reflect.Slice": "reflect",
"reflect.SliceHeader": "reflect",
"reflect.SliceOf": "reflect",
"reflect.String": "reflect",
"reflect.StringHeader": "reflect",
"reflect.Struct": "reflect",
"reflect.StructField": "reflect",
"reflect.StructTag": "reflect",
"reflect.TypeOf": "reflect",
"reflect.Uint": "reflect",
"reflect.Uint16": "reflect",
"reflect.Uint32": "reflect",
"reflect.Uint64": "reflect",
"reflect.Uint8": "reflect",
"reflect.Uintptr": "reflect",
"reflect.UnsafePointer": "reflect",
"reflect.Value": "reflect",
"reflect.ValueError": "reflect",
"reflect.ValueOf": "reflect",
"reflect.Zero": "reflect",
"regexp.Compile": "regexp",
"regexp.CompilePOSIX": "regexp",
"regexp.Match": "regexp",
"regexp.MatchReader": "regexp",
"regexp.MatchString": "regexp",
"regexp.MustCompile": "regexp",
"regexp.MustCompilePOSIX": "regexp",
"regexp.QuoteMeta": "regexp",
"regexp.Regexp": "regexp",
"ring.New": "container/ring",
"ring.Ring": "container/ring",
"rpc.Accept": "net/rpc",
"rpc.Call": "net/rpc",
"rpc.Client": "net/rpc",
"rpc.ClientCodec": "net/rpc",
"rpc.DefaultDebugPath": "net/rpc",
"rpc.DefaultRPCPath": "net/rpc",
"rpc.DefaultServer": "net/rpc",
"rpc.Dial": "net/rpc",
"rpc.DialHTTP": "net/rpc",
"rpc.DialHTTPPath": "net/rpc",
"rpc.ErrShutdown": "net/rpc",
"rpc.HandleHTTP": "net/rpc",
"rpc.NewClient": "net/rpc",
"rpc.NewClientWithCodec": "net/rpc",
"rpc.NewServer": "net/rpc",
"rpc.Register": "net/rpc",
"rpc.RegisterName": "net/rpc",
"rpc.Request": "net/rpc",
"rpc.Response": "net/rpc",
"rpc.ServeCodec": "net/rpc",
"rpc.ServeConn": "net/rpc",
"rpc.ServeRequest": "net/rpc",
"rpc.Server": "net/rpc",
"rpc.ServerCodec": "net/rpc",
"rpc.ServerError": "net/rpc",
"rsa.CRTValue": "crypto/rsa",
"rsa.DecryptOAEP": "crypto/rsa",
"rsa.DecryptPKCS1v15": "crypto/rsa",
"rsa.DecryptPKCS1v15SessionKey": "crypto/rsa",
"rsa.EncryptOAEP": "crypto/rsa",
"rsa.EncryptPKCS1v15": "crypto/rsa",
"rsa.ErrDecryption": "crypto/rsa",
"rsa.ErrMessageTooLong": "crypto/rsa",
"rsa.ErrVerification": "crypto/rsa",
"rsa.GenerateKey": "crypto/rsa",
"rsa.GenerateMultiPrimeKey": "crypto/rsa",
"rsa.OAEPOptions": "crypto/rsa",
"rsa.PKCS1v15DecryptOptions": "crypto/rsa",
"rsa.PSSOptions": "crypto/rsa",
"rsa.PSSSaltLengthAuto": "crypto/rsa",
"rsa.PSSSaltLengthEqualsHash": "crypto/rsa",
"rsa.PrecomputedValues": "crypto/rsa",
"rsa.PrivateKey": "crypto/rsa",
"rsa.PublicKey": "crypto/rsa",
"rsa.SignPKCS1v15": "crypto/rsa",
"rsa.SignPSS": "crypto/rsa",
"rsa.VerifyPKCS1v15": "crypto/rsa",
"rsa.VerifyPSS": "crypto/rsa",
"runtime.BlockProfile": "runtime",
"runtime.BlockProfileRecord": "runtime",
"runtime.Breakpoint": "runtime",
"runtime.CPUProfile": "runtime",
"runtime.Caller": "runtime",
"runtime.Callers": "runtime",
"runtime.Compiler": "runtime",
"runtime.Error": "runtime",
"runtime.Func": "runtime",
"runtime.FuncForPC": "runtime",
"runtime.GC": "runtime",
"runtime.GOARCH": "runtime",
"runtime.GOMAXPROCS": "runtime",
"runtime.GOOS": "runtime",
"runtime.GOROOT": "runtime",
"runtime.Goexit": "runtime",
"runtime.GoroutineProfile": "runtime",
"runtime.Gosched": "runtime",
"runtime.LockOSThread": "runtime",
"runtime.MemProfile": "runtime",
"runtime.MemProfileRate": "runtime",
"runtime.MemProfileRecord": "runtime",
"runtime.MemStats": "runtime",
"runtime.NumCPU": "runtime",
"runtime.NumCgoCall": "runtime",
"runtime.NumGoroutine": "runtime",
"runtime.ReadMemStats": "runtime",
"runtime.ReadTrace": "runtime",
"runtime.SetBlockProfileRate": "runtime",
"runtime.SetCPUProfileRate": "runtime",
"runtime.SetFinalizer": "runtime",
"runtime.Stack": "runtime",
"runtime.StackRecord": "runtime",
"runtime.StartTrace": "runtime",
"runtime.StopTrace": "runtime",
"runtime.ThreadCreateProfile": "runtime",
"runtime.TypeAssertionError": "runtime",
"runtime.UnlockOSThread": "runtime",
"runtime.Version": "runtime",
"scanner.Char": "text/scanner",
"scanner.Comment": "text/scanner",
"scanner.EOF": "text/scanner",
"scanner.Error": "go/scanner",
"scanner.ErrorHandler": "go/scanner",
"scanner.ErrorList": "go/scanner",
"scanner.Float": "text/scanner",
"scanner.GoTokens": "text/scanner",
"scanner.GoWhitespace": "text/scanner",
"scanner.Ident": "text/scanner",
"scanner.Int": "text/scanner",
"scanner.Mode": "go/scanner",
"scanner.Position": "text/scanner",
"scanner.PrintError": "go/scanner",
"scanner.RawString": "text/scanner",
"scanner.ScanChars": "text/scanner",
// "scanner.ScanComments" is ambiguous
"scanner.ScanFloats": "text/scanner",
"scanner.ScanIdents": "text/scanner",
"scanner.ScanInts": "text/scanner",
"scanner.ScanRawStrings": "text/scanner",
"scanner.ScanStrings": "text/scanner",
// "scanner.Scanner" is ambiguous
"scanner.SkipComments": "text/scanner",
"scanner.String": "text/scanner",
"scanner.TokenString": "text/scanner",
"sha1.BlockSize": "crypto/sha1",
"sha1.New": "crypto/sha1",
"sha1.Size": "crypto/sha1",
"sha1.Sum": "crypto/sha1",
"sha256.BlockSize": "crypto/sha256",
"sha256.New": "crypto/sha256",
"sha256.New224": "crypto/sha256",
"sha256.Size": "crypto/sha256",
"sha256.Size224": "crypto/sha256",
"sha256.Sum224": "crypto/sha256",
"sha256.Sum256": "crypto/sha256",
"sha512.BlockSize": "crypto/sha512",
"sha512.New": "crypto/sha512",
"sha512.New384": "crypto/sha512",
"sha512.New512_224": "crypto/sha512",
"sha512.New512_256": "crypto/sha512",
"sha512.Size": "crypto/sha512",
"sha512.Size224": "crypto/sha512",
"sha512.Size256": "crypto/sha512",
"sha512.Size384": "crypto/sha512",
"sha512.Sum384": "crypto/sha512",
"sha512.Sum512": "crypto/sha512",
"sha512.Sum512_224": "crypto/sha512",
"sha512.Sum512_256": "crypto/sha512",
"signal.Ignore": "os/signal",
"signal.Notify": "os/signal",
"signal.Reset": "os/signal",
"signal.Stop": "os/signal",
"smtp.Auth": "net/smtp",
"smtp.CRAMMD5Auth": "net/smtp",
"smtp.Client": "net/smtp",
"smtp.Dial": "net/smtp",
"smtp.NewClient": "net/smtp",
"smtp.PlainAuth": "net/smtp",
"smtp.SendMail": "net/smtp",
"smtp.ServerInfo": "net/smtp",
"sort.Float64Slice": "sort",
"sort.Float64s": "sort",
"sort.Float64sAreSorted": "sort",
"sort.IntSlice": "sort",
"sort.Interface": "sort",
"sort.Ints": "sort",
"sort.IntsAreSorted": "sort",
"sort.IsSorted": "sort",
"sort.Reverse": "sort",
"sort.Search": "sort",
"sort.SearchFloat64s": "sort",
"sort.SearchInts": "sort",
"sort.SearchStrings": "sort",
"sort.Sort": "sort",
"sort.Stable": "sort",
"sort.StringSlice": "sort",
"sort.Strings": "sort",
"sort.StringsAreSorted": "sort",
"sql.DB": "database/sql",
"sql.DBStats": "database/sql",
"sql.Drivers": "database/sql",
"sql.ErrNoRows": "database/sql",
"sql.ErrTxDone": "database/sql",
"sql.NullBool": "database/sql",
"sql.NullFloat64": "database/sql",
"sql.NullInt64": "database/sql",
"sql.NullString": "database/sql",
"sql.Open": "database/sql",
"sql.RawBytes": "database/sql",
"sql.Register": "database/sql",
"sql.Result": "database/sql",
"sql.Row": "database/sql",
"sql.Rows": "database/sql",
"sql.Scanner": "database/sql",
"sql.Stmt": "database/sql",
"sql.Tx": "database/sql",
"strconv.AppendBool": "strconv",
"strconv.AppendFloat": "strconv",
"strconv.AppendInt": "strconv",
"strconv.AppendQuote": "strconv",
"strconv.AppendQuoteRune": "strconv",
"strconv.AppendQuoteRuneToASCII": "strconv",
"strconv.AppendQuoteToASCII": "strconv",
"strconv.AppendUint": "strconv",
"strconv.Atoi": "strconv",
"strconv.CanBackquote": "strconv",
"strconv.ErrRange": "strconv",
"strconv.ErrSyntax": "strconv",
"strconv.FormatBool": "strconv",
"strconv.FormatFloat": "strconv",
"strconv.FormatInt": "strconv",
"strconv.FormatUint": "strconv",
"strconv.IntSize": "strconv",
"strconv.IsPrint": "strconv",
"strconv.Itoa": "strconv",
"strconv.NumError": "strconv",
"strconv.ParseBool": "strconv",
"strconv.ParseFloat": "strconv",
"strconv.ParseInt": "strconv",
"strconv.ParseUint": "strconv",
"strconv.Quote": "strconv",
"strconv.QuoteRune": "strconv",
"strconv.QuoteRuneToASCII": "strconv",
"strconv.QuoteToASCII": "strconv",
"strconv.Unquote": "strconv",
"strconv.UnquoteChar": "strconv",
"strings.Compare": "strings",
"strings.Contains": "strings",
"strings.ContainsAny": "strings",
"strings.ContainsRune": "strings",
"strings.Count": "strings",
"strings.EqualFold": "strings",
"strings.Fields": "strings",
"strings.FieldsFunc": "strings",
"strings.HasPrefix": "strings",
"strings.HasSuffix": "strings",
"strings.Index": "strings",
"strings.IndexAny": "strings",
"strings.IndexByte": "strings",
"strings.IndexFunc": "strings",
"strings.IndexRune": "strings",
"strings.Join": "strings",
"strings.LastIndex": "strings",
"strings.LastIndexAny": "strings",
"strings.LastIndexByte": "strings",
"strings.LastIndexFunc": "strings",
"strings.Map": "strings",
"strings.NewReader": "strings",
"strings.NewReplacer": "strings",
"strings.Reader": "strings",
"strings.Repeat": "strings",
"strings.Replace": "strings",
"strings.Replacer": "strings",
"strings.Split": "strings",
"strings.SplitAfter": "strings",
"strings.SplitAfterN": "strings",
"strings.SplitN": "strings",
"strings.Title": "strings",
"strings.ToLower": "strings",
"strings.ToLowerSpecial": "strings",
"strings.ToTitle": "strings",
"strings.ToTitleSpecial": "strings",
"strings.ToUpper": "strings",
"strings.ToUpperSpecial": "strings",
"strings.Trim": "strings",
"strings.TrimFunc": "strings",
"strings.TrimLeft": "strings",
"strings.TrimLeftFunc": "strings",
"strings.TrimPrefix": "strings",
"strings.TrimRight": "strings",
"strings.TrimRightFunc": "strings",
"strings.TrimSpace": "strings",
"strings.TrimSuffix": "strings",
"subtle.ConstantTimeByteEq": "crypto/subtle",
"subtle.ConstantTimeCompare": "crypto/subtle",
"subtle.ConstantTimeCopy": "crypto/subtle",
"subtle.ConstantTimeEq": "crypto/subtle",
"subtle.ConstantTimeLessOrEq": "crypto/subtle",
"subtle.ConstantTimeSelect": "crypto/subtle",
"suffixarray.Index": "index/suffixarray",
"suffixarray.New": "index/suffixarray",
"sync.Cond": "sync",
"sync.Locker": "sync",
"sync.Mutex": "sync",
"sync.NewCond": "sync",
"sync.Once": "sync",
"sync.Pool": "sync",
"sync.RWMutex": "sync",
"sync.WaitGroup": "sync",
"syntax.ClassNL": "regexp/syntax",
"syntax.Compile": "regexp/syntax",
"syntax.DotNL": "regexp/syntax",
"syntax.EmptyBeginLine": "regexp/syntax",
"syntax.EmptyBeginText": "regexp/syntax",
"syntax.EmptyEndLine": "regexp/syntax",
"syntax.EmptyEndText": "regexp/syntax",
"syntax.EmptyNoWordBoundary": "regexp/syntax",
"syntax.EmptyOp": "regexp/syntax",
"syntax.EmptyOpContext": "regexp/syntax",
"syntax.EmptyWordBoundary": "regexp/syntax",
"syntax.ErrInternalError": "regexp/syntax",
"syntax.ErrInvalidCharClass": "regexp/syntax",
"syntax.ErrInvalidCharRange": "regexp/syntax",
"syntax.ErrInvalidEscape": "regexp/syntax",
"syntax.ErrInvalidNamedCapture": "regexp/syntax",
"syntax.ErrInvalidPerlOp": "regexp/syntax",
"syntax.ErrInvalidRepeatOp": "regexp/syntax",
"syntax.ErrInvalidRepeatSize": "regexp/syntax",
"syntax.ErrInvalidUTF8": "regexp/syntax",
"syntax.ErrMissingBracket": "regexp/syntax",
"syntax.ErrMissingParen": "regexp/syntax",
"syntax.ErrMissingRepeatArgument": "regexp/syntax",
"syntax.ErrTrailingBackslash": "regexp/syntax",
"syntax.ErrUnexpectedParen": "regexp/syntax",
"syntax.Error": "regexp/syntax",
"syntax.ErrorCode": "regexp/syntax",
"syntax.Flags": "regexp/syntax",
"syntax.FoldCase": "regexp/syntax",
"syntax.Inst": "regexp/syntax",
"syntax.InstAlt": "regexp/syntax",
"syntax.InstAltMatch": "regexp/syntax",
"syntax.InstCapture": "regexp/syntax",
"syntax.InstEmptyWidth": "regexp/syntax",
"syntax.InstFail": "regexp/syntax",
"syntax.InstMatch": "regexp/syntax",
"syntax.InstNop": "regexp/syntax",
"syntax.InstOp": "regexp/syntax",
"syntax.InstRune": "regexp/syntax",
"syntax.InstRune1": "regexp/syntax",
"syntax.InstRuneAny": "regexp/syntax",
"syntax.InstRuneAnyNotNL": "regexp/syntax",
"syntax.IsWordChar": "regexp/syntax",
"syntax.Literal": "regexp/syntax",
"syntax.MatchNL": "regexp/syntax",
"syntax.NonGreedy": "regexp/syntax",
"syntax.OneLine": "regexp/syntax",
"syntax.Op": "regexp/syntax",
"syntax.OpAlternate": "regexp/syntax",
"syntax.OpAnyChar": "regexp/syntax",
"syntax.OpAnyCharNotNL": "regexp/syntax",
"syntax.OpBeginLine": "regexp/syntax",
"syntax.OpBeginText": "regexp/syntax",
"syntax.OpCapture": "regexp/syntax",
"syntax.OpCharClass": "regexp/syntax",
"syntax.OpConcat": "regexp/syntax",
"syntax.OpEmptyMatch": "regexp/syntax",
"syntax.OpEndLine": "regexp/syntax",
"syntax.OpEndText": "regexp/syntax",
"syntax.OpLiteral": "regexp/syntax",
"syntax.OpNoMatch": "regexp/syntax",
"syntax.OpNoWordBoundary": "regexp/syntax",
"syntax.OpPlus": "regexp/syntax",
"syntax.OpQuest": "regexp/syntax",
"syntax.OpRepeat": "regexp/syntax",
"syntax.OpStar": "regexp/syntax",
"syntax.OpWordBoundary": "regexp/syntax",
"syntax.POSIX": "regexp/syntax",
"syntax.Parse": "regexp/syntax",
"syntax.Perl": "regexp/syntax",
"syntax.PerlX": "regexp/syntax",
"syntax.Prog": "regexp/syntax",
"syntax.Regexp": "regexp/syntax",
"syntax.Simple": "regexp/syntax",
"syntax.UnicodeGroups": "regexp/syntax",
"syntax.WasDollar": "regexp/syntax",
"syscall.AF_ALG": "syscall",
"syscall.AF_APPLETALK": "syscall",
"syscall.AF_ARP": "syscall",
"syscall.AF_ASH": "syscall",
"syscall.AF_ATM": "syscall",
"syscall.AF_ATMPVC": "syscall",
"syscall.AF_ATMSVC": "syscall",
"syscall.AF_AX25": "syscall",
"syscall.AF_BLUETOOTH": "syscall",
"syscall.AF_BRIDGE": "syscall",
"syscall.AF_CAIF": "syscall",
"syscall.AF_CAN": "syscall",
"syscall.AF_CCITT": "syscall",
"syscall.AF_CHAOS": "syscall",
"syscall.AF_CNT": "syscall",
"syscall.AF_COIP": "syscall",
"syscall.AF_DATAKIT": "syscall",
"syscall.AF_DECnet": "syscall",
"syscall.AF_DLI": "syscall",
"syscall.AF_E164": "syscall",
"syscall.AF_ECMA": "syscall",
"syscall.AF_ECONET": "syscall",
"syscall.AF_ENCAP": "syscall",
"syscall.AF_FILE": "syscall",
"syscall.AF_HYLINK": "syscall",
"syscall.AF_IEEE80211": "syscall",
"syscall.AF_IEEE802154": "syscall",
"syscall.AF_IMPLINK": "syscall",
"syscall.AF_INET": "syscall",
"syscall.AF_INET6": "syscall",
"syscall.AF_INET6_SDP": "syscall",
"syscall.AF_INET_SDP": "syscall",
"syscall.AF_IPX": "syscall",
"syscall.AF_IRDA": "syscall",
"syscall.AF_ISDN": "syscall",
"syscall.AF_ISO": "syscall",
"syscall.AF_IUCV": "syscall",
"syscall.AF_KEY": "syscall",
"syscall.AF_LAT": "syscall",
"syscall.AF_LINK": "syscall",
"syscall.AF_LLC": "syscall",
"syscall.AF_LOCAL": "syscall",
"syscall.AF_MAX": "syscall",
"syscall.AF_MPLS": "syscall",
"syscall.AF_NATM": "syscall",
"syscall.AF_NDRV": "syscall",
"syscall.AF_NETBEUI": "syscall",
"syscall.AF_NETBIOS": "syscall",
"syscall.AF_NETGRAPH": "syscall",
"syscall.AF_NETLINK": "syscall",
"syscall.AF_NETROM": "syscall",
"syscall.AF_NS": "syscall",
"syscall.AF_OROUTE": "syscall",
"syscall.AF_OSI": "syscall",
"syscall.AF_PACKET": "syscall",
"syscall.AF_PHONET": "syscall",
"syscall.AF_PPP": "syscall",
"syscall.AF_PPPOX": "syscall",
"syscall.AF_PUP": "syscall",
"syscall.AF_RDS": "syscall",
"syscall.AF_RESERVED_36": "syscall",
"syscall.AF_ROSE": "syscall",
"syscall.AF_ROUTE": "syscall",
"syscall.AF_RXRPC": "syscall",
"syscall.AF_SCLUSTER": "syscall",
"syscall.AF_SECURITY": "syscall",
"syscall.AF_SIP": "syscall",
"syscall.AF_SLOW": "syscall",
"syscall.AF_SNA": "syscall",
"syscall.AF_SYSTEM": "syscall",
"syscall.AF_TIPC": "syscall",
"syscall.AF_UNIX": "syscall",
"syscall.AF_UNSPEC": "syscall",
"syscall.AF_VENDOR00": "syscall",
"syscall.AF_VENDOR01": "syscall",
"syscall.AF_VENDOR02": "syscall",
"syscall.AF_VENDOR03": "syscall",
"syscall.AF_VENDOR04": "syscall",
"syscall.AF_VENDOR05": "syscall",
"syscall.AF_VENDOR06": "syscall",
"syscall.AF_VENDOR07": "syscall",
"syscall.AF_VENDOR08": "syscall",
"syscall.AF_VENDOR09": "syscall",
"syscall.AF_VENDOR10": "syscall",
"syscall.AF_VENDOR11": "syscall",
"syscall.AF_VENDOR12": "syscall",
"syscall.AF_VENDOR13": "syscall",
"syscall.AF_VENDOR14": "syscall",
"syscall.AF_VENDOR15": "syscall",
"syscall.AF_VENDOR16": "syscall",
"syscall.AF_VENDOR17": "syscall",
"syscall.AF_VENDOR18": "syscall",
"syscall.AF_VENDOR19": "syscall",
"syscall.AF_VENDOR20": "syscall",
"syscall.AF_VENDOR21": "syscall",
"syscall.AF_VENDOR22": "syscall",
"syscall.AF_VENDOR23": "syscall",
"syscall.AF_VENDOR24": "syscall",
"syscall.AF_VENDOR25": "syscall",
"syscall.AF_VENDOR26": "syscall",
"syscall.AF_VENDOR27": "syscall",
"syscall.AF_VENDOR28": "syscall",
"syscall.AF_VENDOR29": "syscall",
"syscall.AF_VENDOR30": "syscall",
"syscall.AF_VENDOR31": "syscall",
"syscall.AF_VENDOR32": "syscall",
"syscall.AF_VENDOR33": "syscall",
"syscall.AF_VENDOR34": "syscall",
"syscall.AF_VENDOR35": "syscall",
"syscall.AF_VENDOR36": "syscall",
"syscall.AF_VENDOR37": "syscall",
"syscall.AF_VENDOR38": "syscall",
"syscall.AF_VENDOR39": "syscall",
"syscall.AF_VENDOR40": "syscall",
"syscall.AF_VENDOR41": "syscall",
"syscall.AF_VENDOR42": "syscall",
"syscall.AF_VENDOR43": "syscall",
"syscall.AF_VENDOR44": "syscall",
"syscall.AF_VENDOR45": "syscall",
"syscall.AF_VENDOR46": "syscall",
"syscall.AF_VENDOR47": "syscall",
"syscall.AF_WANPIPE": "syscall",
"syscall.AF_X25": "syscall",
"syscall.AI_CANONNAME": "syscall",
"syscall.AI_NUMERICHOST": "syscall",
"syscall.AI_PASSIVE": "syscall",
"syscall.APPLICATION_ERROR": "syscall",
"syscall.ARPHRD_ADAPT": "syscall",
"syscall.ARPHRD_APPLETLK": "syscall",
"syscall.ARPHRD_ARCNET": "syscall",
"syscall.ARPHRD_ASH": "syscall",
"syscall.ARPHRD_ATM": "syscall",
"syscall.ARPHRD_AX25": "syscall",
"syscall.ARPHRD_BIF": "syscall",
"syscall.ARPHRD_CHAOS": "syscall",
"syscall.ARPHRD_CISCO": "syscall",
"syscall.ARPHRD_CSLIP": "syscall",
"syscall.ARPHRD_CSLIP6": "syscall",
"syscall.ARPHRD_DDCMP": "syscall",
"syscall.ARPHRD_DLCI": "syscall",
"syscall.ARPHRD_ECONET": "syscall",
"syscall.ARPHRD_EETHER": "syscall",
"syscall.ARPHRD_ETHER": "syscall",
"syscall.ARPHRD_EUI64": "syscall",
"syscall.ARPHRD_FCAL": "syscall",
"syscall.ARPHRD_FCFABRIC": "syscall",
"syscall.ARPHRD_FCPL": "syscall",
"syscall.ARPHRD_FCPP": "syscall",
"syscall.ARPHRD_FDDI": "syscall",
"syscall.ARPHRD_FRAD": "syscall",
"syscall.ARPHRD_FRELAY": "syscall",
"syscall.ARPHRD_HDLC": "syscall",
"syscall.ARPHRD_HIPPI": "syscall",
"syscall.ARPHRD_HWX25": "syscall",
"syscall.ARPHRD_IEEE1394": "syscall",
"syscall.ARPHRD_IEEE802": "syscall",
"syscall.ARPHRD_IEEE80211": "syscall",
"syscall.ARPHRD_IEEE80211_PRISM": "syscall",
"syscall.ARPHRD_IEEE80211_RADIOTAP": "syscall",
"syscall.ARPHRD_IEEE802154": "syscall",
"syscall.ARPHRD_IEEE802154_PHY": "syscall",
"syscall.ARPHRD_IEEE802_TR": "syscall",
"syscall.ARPHRD_INFINIBAND": "syscall",
"syscall.ARPHRD_IPDDP": "syscall",
"syscall.ARPHRD_IPGRE": "syscall",
"syscall.ARPHRD_IRDA": "syscall",
"syscall.ARPHRD_LAPB": "syscall",
"syscall.ARPHRD_LOCALTLK": "syscall",
"syscall.ARPHRD_LOOPBACK": "syscall",
"syscall.ARPHRD_METRICOM": "syscall",
"syscall.ARPHRD_NETROM": "syscall",
"syscall.ARPHRD_NONE": "syscall",
"syscall.ARPHRD_PIMREG": "syscall",
"syscall.ARPHRD_PPP": "syscall",
"syscall.ARPHRD_PRONET": "syscall",
"syscall.ARPHRD_RAWHDLC": "syscall",
"syscall.ARPHRD_ROSE": "syscall",
"syscall.ARPHRD_RSRVD": "syscall",
"syscall.ARPHRD_SIT": "syscall",
"syscall.ARPHRD_SKIP": "syscall",
"syscall.ARPHRD_SLIP": "syscall",
"syscall.ARPHRD_SLIP6": "syscall",
"syscall.ARPHRD_STRIP": "syscall",
"syscall.ARPHRD_TUNNEL": "syscall",
"syscall.ARPHRD_TUNNEL6": "syscall",
"syscall.ARPHRD_VOID": "syscall",
"syscall.ARPHRD_X25": "syscall",
"syscall.AUTHTYPE_CLIENT": "syscall",
"syscall.AUTHTYPE_SERVER": "syscall",
"syscall.Accept": "syscall",
"syscall.Accept4": "syscall",
"syscall.AcceptEx": "syscall",
"syscall.Access": "syscall",
"syscall.Acct": "syscall",
"syscall.AddrinfoW": "syscall",
"syscall.Adjtime": "syscall",
"syscall.Adjtimex": "syscall",
"syscall.AttachLsf": "syscall",
"syscall.B0": "syscall",
"syscall.B1000000": "syscall",
"syscall.B110": "syscall",
"syscall.B115200": "syscall",
"syscall.B1152000": "syscall",
"syscall.B1200": "syscall",
"syscall.B134": "syscall",
"syscall.B14400": "syscall",
"syscall.B150": "syscall",
"syscall.B1500000": "syscall",
"syscall.B1800": "syscall",
"syscall.B19200": "syscall",
"syscall.B200": "syscall",
"syscall.B2000000": "syscall",
"syscall.B230400": "syscall",
"syscall.B2400": "syscall",
"syscall.B2500000": "syscall",
"syscall.B28800": "syscall",
"syscall.B300": "syscall",
"syscall.B3000000": "syscall",
"syscall.B3500000": "syscall",
"syscall.B38400": "syscall",
"syscall.B4000000": "syscall",
"syscall.B460800": "syscall",
"syscall.B4800": "syscall",
"syscall.B50": "syscall",
"syscall.B500000": "syscall",
"syscall.B57600": "syscall",
"syscall.B576000": "syscall",
"syscall.B600": "syscall",
"syscall.B7200": "syscall",
"syscall.B75": "syscall",
"syscall.B76800": "syscall",
"syscall.B921600": "syscall",
"syscall.B9600": "syscall",
"syscall.BASE_PROTOCOL": "syscall",
"syscall.BIOCFEEDBACK": "syscall",
"syscall.BIOCFLUSH": "syscall",
"syscall.BIOCGBLEN": "syscall",
"syscall.BIOCGDIRECTION": "syscall",
"syscall.BIOCGDIRFILT": "syscall",
"syscall.BIOCGDLT": "syscall",
"syscall.BIOCGDLTLIST": "syscall",
"syscall.BIOCGETBUFMODE": "syscall",
"syscall.BIOCGETIF": "syscall",
"syscall.BIOCGETZMAX": "syscall",
"syscall.BIOCGFEEDBACK": "syscall",
"syscall.BIOCGFILDROP": "syscall",
"syscall.BIOCGHDRCMPLT": "syscall",
"syscall.BIOCGRSIG": "syscall",
"syscall.BIOCGRTIMEOUT": "syscall",
"syscall.BIOCGSEESENT": "syscall",
"syscall.BIOCGSTATS": "syscall",
"syscall.BIOCGSTATSOLD": "syscall",
"syscall.BIOCGTSTAMP": "syscall",
"syscall.BIOCIMMEDIATE": "syscall",
"syscall.BIOCLOCK": "syscall",
"syscall.BIOCPROMISC": "syscall",
"syscall.BIOCROTZBUF": "syscall",
"syscall.BIOCSBLEN": "syscall",
"syscall.BIOCSDIRECTION": "syscall",
"syscall.BIOCSDIRFILT": "syscall",
"syscall.BIOCSDLT": "syscall",
"syscall.BIOCSETBUFMODE": "syscall",
"syscall.BIOCSETF": "syscall",
"syscall.BIOCSETFNR": "syscall",
"syscall.BIOCSETIF": "syscall",
"syscall.BIOCSETWF": "syscall",
"syscall.BIOCSETZBUF": "syscall",
"syscall.BIOCSFEEDBACK": "syscall",
"syscall.BIOCSFILDROP": "syscall",
"syscall.BIOCSHDRCMPLT": "syscall",
"syscall.BIOCSRSIG": "syscall",
"syscall.BIOCSRTIMEOUT": "syscall",
"syscall.BIOCSSEESENT": "syscall",
"syscall.BIOCSTCPF": "syscall",
"syscall.BIOCSTSTAMP": "syscall",
"syscall.BIOCSUDPF": "syscall",
"syscall.BIOCVERSION": "syscall",
"syscall.BPF_A": "syscall",
"syscall.BPF_ABS": "syscall",
"syscall.BPF_ADD": "syscall",
"syscall.BPF_ALIGNMENT": "syscall",
"syscall.BPF_ALIGNMENT32": "syscall",
"syscall.BPF_ALU": "syscall",
"syscall.BPF_AND": "syscall",
"syscall.BPF_B": "syscall",
"syscall.BPF_BUFMODE_BUFFER": "syscall",
"syscall.BPF_BUFMODE_ZBUF": "syscall",
"syscall.BPF_DFLTBUFSIZE": "syscall",
"syscall.BPF_DIRECTION_IN": "syscall",
"syscall.BPF_DIRECTION_OUT": "syscall",
"syscall.BPF_DIV": "syscall",
"syscall.BPF_H": "syscall",
"syscall.BPF_IMM": "syscall",
"syscall.BPF_IND": "syscall",
"syscall.BPF_JA": "syscall",
"syscall.BPF_JEQ": "syscall",
"syscall.BPF_JGE": "syscall",
"syscall.BPF_JGT": "syscall",
"syscall.BPF_JMP": "syscall",
"syscall.BPF_JSET": "syscall",
"syscall.BPF_K": "syscall",
"syscall.BPF_LD": "syscall",
"syscall.BPF_LDX": "syscall",
"syscall.BPF_LEN": "syscall",
"syscall.BPF_LSH": "syscall",
"syscall.BPF_MAJOR_VERSION": "syscall",
"syscall.BPF_MAXBUFSIZE": "syscall",
"syscall.BPF_MAXINSNS": "syscall",
"syscall.BPF_MEM": "syscall",
"syscall.BPF_MEMWORDS": "syscall",
"syscall.BPF_MINBUFSIZE": "syscall",
"syscall.BPF_MINOR_VERSION": "syscall",
"syscall.BPF_MISC": "syscall",
"syscall.BPF_MSH": "syscall",
"syscall.BPF_MUL": "syscall",
"syscall.BPF_NEG": "syscall",
"syscall.BPF_OR": "syscall",
"syscall.BPF_RELEASE": "syscall",
"syscall.BPF_RET": "syscall",
"syscall.BPF_RSH": "syscall",
"syscall.BPF_ST": "syscall",
"syscall.BPF_STX": "syscall",
"syscall.BPF_SUB": "syscall",
"syscall.BPF_TAX": "syscall",
"syscall.BPF_TXA": "syscall",
"syscall.BPF_T_BINTIME": "syscall",
"syscall.BPF_T_BINTIME_FAST": "syscall",
"syscall.BPF_T_BINTIME_MONOTONIC": "syscall",
"syscall.BPF_T_BINTIME_MONOTONIC_FAST": "syscall",
"syscall.BPF_T_FAST": "syscall",
"syscall.BPF_T_FLAG_MASK": "syscall",
"syscall.BPF_T_FORMAT_MASK": "syscall",
"syscall.BPF_T_MICROTIME": "syscall",
"syscall.BPF_T_MICROTIME_FAST": "syscall",
"syscall.BPF_T_MICROTIME_MONOTONIC": "syscall",
"syscall.BPF_T_MICROTIME_MONOTONIC_FAST": "syscall",
"syscall.BPF_T_MONOTONIC": "syscall",
"syscall.BPF_T_MONOTONIC_FAST": "syscall",
"syscall.BPF_T_NANOTIME": "syscall",
"syscall.BPF_T_NANOTIME_FAST": "syscall",
"syscall.BPF_T_NANOTIME_MONOTONIC": "syscall",
"syscall.BPF_T_NANOTIME_MONOTONIC_FAST": "syscall",
"syscall.BPF_T_NONE": "syscall",
"syscall.BPF_T_NORMAL": "syscall",
"syscall.BPF_W": "syscall",
"syscall.BPF_X": "syscall",
"syscall.BRKINT": "syscall",
"syscall.Bind": "syscall",
"syscall.BindToDevice": "syscall",
"syscall.BpfBuflen": "syscall",
"syscall.BpfDatalink": "syscall",
"syscall.BpfHdr": "syscall",
"syscall.BpfHeadercmpl": "syscall",
"syscall.BpfInsn": "syscall",
"syscall.BpfInterface": "syscall",
"syscall.BpfJump": "syscall",
"syscall.BpfProgram": "syscall",
"syscall.BpfStat": "syscall",
"syscall.BpfStats": "syscall",
"syscall.BpfStmt": "syscall",
"syscall.BpfTimeout": "syscall",
"syscall.BpfTimeval": "syscall",
"syscall.BpfVersion": "syscall",
"syscall.BpfZbuf": "syscall",
"syscall.BpfZbufHeader": "syscall",
"syscall.ByHandleFileInformation": "syscall",
"syscall.BytePtrFromString": "syscall",
"syscall.ByteSliceFromString": "syscall",
"syscall.CCR0_FLUSH": "syscall",
"syscall.CERT_CHAIN_POLICY_AUTHENTICODE": "syscall",
"syscall.CERT_CHAIN_POLICY_AUTHENTICODE_TS": "syscall",
"syscall.CERT_CHAIN_POLICY_BASE": "syscall",
"syscall.CERT_CHAIN_POLICY_BASIC_CONSTRAINTS": "syscall",
"syscall.CERT_CHAIN_POLICY_EV": "syscall",
"syscall.CERT_CHAIN_POLICY_MICROSOFT_ROOT": "syscall",
"syscall.CERT_CHAIN_POLICY_NT_AUTH": "syscall",
"syscall.CERT_CHAIN_POLICY_SSL": "syscall",
"syscall.CERT_E_CN_NO_MATCH": "syscall",
"syscall.CERT_E_EXPIRED": "syscall",
"syscall.CERT_E_PURPOSE": "syscall",
"syscall.CERT_E_ROLE": "syscall",
"syscall.CERT_E_UNTRUSTEDROOT": "syscall",
"syscall.CERT_STORE_ADD_ALWAYS": "syscall",
"syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG": "syscall",
"syscall.CERT_STORE_PROV_MEMORY": "syscall",
"syscall.CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT": "syscall",
"syscall.CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT": "syscall",
"syscall.CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT": "syscall",
"syscall.CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT": "syscall",
"syscall.CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT": "syscall",
"syscall.CERT_TRUST_INVALID_BASIC_CONSTRAINTS": "syscall",
"syscall.CERT_TRUST_INVALID_EXTENSION": "syscall",
"syscall.CERT_TRUST_INVALID_NAME_CONSTRAINTS": "syscall",
"syscall.CERT_TRUST_INVALID_POLICY_CONSTRAINTS": "syscall",
"syscall.CERT_TRUST_IS_CYCLIC": "syscall",
"syscall.CERT_TRUST_IS_EXPLICIT_DISTRUST": "syscall",
"syscall.CERT_TRUST_IS_NOT_SIGNATURE_VALID": "syscall",
"syscall.CERT_TRUST_IS_NOT_TIME_VALID": "syscall",
"syscall.CERT_TRUST_IS_NOT_VALID_FOR_USAGE": "syscall",
"syscall.CERT_TRUST_IS_OFFLINE_REVOCATION": "syscall",
"syscall.CERT_TRUST_IS_REVOKED": "syscall",
"syscall.CERT_TRUST_IS_UNTRUSTED_ROOT": "syscall",
"syscall.CERT_TRUST_NO_ERROR": "syscall",
"syscall.CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY": "syscall",
"syscall.CERT_TRUST_REVOCATION_STATUS_UNKNOWN": "syscall",
"syscall.CFLUSH": "syscall",
"syscall.CLOCAL": "syscall",
"syscall.CLONE_CHILD_CLEARTID": "syscall",
"syscall.CLONE_CHILD_SETTID": "syscall",
"syscall.CLONE_CSIGNAL": "syscall",
"syscall.CLONE_DETACHED": "syscall",
"syscall.CLONE_FILES": "syscall",
"syscall.CLONE_FS": "syscall",
"syscall.CLONE_IO": "syscall",
"syscall.CLONE_NEWIPC": "syscall",
"syscall.CLONE_NEWNET": "syscall",
"syscall.CLONE_NEWNS": "syscall",
"syscall.CLONE_NEWPID": "syscall",
"syscall.CLONE_NEWUSER": "syscall",
"syscall.CLONE_NEWUTS": "syscall",
"syscall.CLONE_PARENT": "syscall",
"syscall.CLONE_PARENT_SETTID": "syscall",
"syscall.CLONE_PID": "syscall",
"syscall.CLONE_PTRACE": "syscall",
"syscall.CLONE_SETTLS": "syscall",
"syscall.CLONE_SIGHAND": "syscall",
"syscall.CLONE_SYSVSEM": "syscall",
"syscall.CLONE_THREAD": "syscall",
"syscall.CLONE_UNTRACED": "syscall",
"syscall.CLONE_VFORK": "syscall",
"syscall.CLONE_VM": "syscall",
"syscall.CPUID_CFLUSH": "syscall",
"syscall.CREAD": "syscall",
"syscall.CREATE_ALWAYS": "syscall",
"syscall.CREATE_NEW": "syscall",
"syscall.CREATE_NEW_PROCESS_GROUP": "syscall",
"syscall.CREATE_UNICODE_ENVIRONMENT": "syscall",
"syscall.CRYPT_DEFAULT_CONTAINER_OPTIONAL": "syscall",
"syscall.CRYPT_DELETEKEYSET": "syscall",
"syscall.CRYPT_MACHINE_KEYSET": "syscall",
"syscall.CRYPT_NEWKEYSET": "syscall",
"syscall.CRYPT_SILENT": "syscall",
"syscall.CRYPT_VERIFYCONTEXT": "syscall",
"syscall.CS5": "syscall",
"syscall.CS6": "syscall",
"syscall.CS7": "syscall",
"syscall.CS8": "syscall",
"syscall.CSIZE": "syscall",
"syscall.CSTART": "syscall",
"syscall.CSTATUS": "syscall",
"syscall.CSTOP": "syscall",
"syscall.CSTOPB": "syscall",
"syscall.CSUSP": "syscall",
"syscall.CTL_MAXNAME": "syscall",
"syscall.CTL_NET": "syscall",
"syscall.CTL_QUERY": "syscall",
"syscall.CTRL_BREAK_EVENT": "syscall",
"syscall.CTRL_C_EVENT": "syscall",
"syscall.CancelIo": "syscall",
"syscall.CancelIoEx": "syscall",
"syscall.CertAddCertificateContextToStore": "syscall",
"syscall.CertChainContext": "syscall",
"syscall.CertChainElement": "syscall",
"syscall.CertChainPara": "syscall",
"syscall.CertChainPolicyPara": "syscall",
"syscall.CertChainPolicyStatus": "syscall",
"syscall.CertCloseStore": "syscall",
"syscall.CertContext": "syscall",
"syscall.CertCreateCertificateContext": "syscall",
"syscall.CertEnhKeyUsage": "syscall",
"syscall.CertEnumCertificatesInStore": "syscall",
"syscall.CertFreeCertificateChain": "syscall",
"syscall.CertFreeCertificateContext": "syscall",
"syscall.CertGetCertificateChain": "syscall",
"syscall.CertOpenStore": "syscall",
"syscall.CertOpenSystemStore": "syscall",
"syscall.CertRevocationInfo": "syscall",
"syscall.CertSimpleChain": "syscall",
"syscall.CertTrustStatus": "syscall",
"syscall.CertUsageMatch": "syscall",
"syscall.CertVerifyCertificateChainPolicy": "syscall",
"syscall.Chdir": "syscall",
"syscall.CheckBpfVersion": "syscall",
"syscall.Chflags": "syscall",
"syscall.Chmod": "syscall",
"syscall.Chown": "syscall",
"syscall.Chroot": "syscall",
"syscall.Clearenv": "syscall",
"syscall.Close": "syscall",
"syscall.CloseHandle": "syscall",
"syscall.CloseOnExec": "syscall",
"syscall.Closesocket": "syscall",
"syscall.CmsgLen": "syscall",
"syscall.CmsgSpace": "syscall",
"syscall.Cmsghdr": "syscall",
"syscall.CommandLineToArgv": "syscall",
"syscall.ComputerName": "syscall",
"syscall.Connect": "syscall",
"syscall.ConnectEx": "syscall",
"syscall.ConvertSidToStringSid": "syscall",
"syscall.ConvertStringSidToSid": "syscall",
"syscall.CopySid": "syscall",
"syscall.Creat": "syscall",
"syscall.CreateDirectory": "syscall",
"syscall.CreateFile": "syscall",
"syscall.CreateFileMapping": "syscall",
"syscall.CreateHardLink": "syscall",
"syscall.CreateIoCompletionPort": "syscall",
"syscall.CreatePipe": "syscall",
"syscall.CreateProcess": "syscall",
"syscall.CreateSymbolicLink": "syscall",
"syscall.CreateToolhelp32Snapshot": "syscall",
"syscall.Credential": "syscall",
"syscall.CryptAcquireContext": "syscall",
"syscall.CryptGenRandom": "syscall",
"syscall.CryptReleaseContext": "syscall",
"syscall.DIOCBSFLUSH": "syscall",
"syscall.DIOCOSFPFLUSH": "syscall",
"syscall.DLL": "syscall",
"syscall.DLLError": "syscall",
"syscall.DLT_A429": "syscall",
"syscall.DLT_A653_ICM": "syscall",
"syscall.DLT_AIRONET_HEADER": "syscall",
"syscall.DLT_AOS": "syscall",
"syscall.DLT_APPLE_IP_OVER_IEEE1394": "syscall",
"syscall.DLT_ARCNET": "syscall",
"syscall.DLT_ARCNET_LINUX": "syscall",
"syscall.DLT_ATM_CLIP": "syscall",
"syscall.DLT_ATM_RFC1483": "syscall",
"syscall.DLT_AURORA": "syscall",
"syscall.DLT_AX25": "syscall",
"syscall.DLT_AX25_KISS": "syscall",
"syscall.DLT_BACNET_MS_TP": "syscall",
"syscall.DLT_BLUETOOTH_HCI_H4": "syscall",
"syscall.DLT_BLUETOOTH_HCI_H4_WITH_PHDR": "syscall",
"syscall.DLT_CAN20B": "syscall",
"syscall.DLT_CAN_SOCKETCAN": "syscall",
"syscall.DLT_CHAOS": "syscall",
"syscall.DLT_CHDLC": "syscall",
"syscall.DLT_CISCO_IOS": "syscall",
"syscall.DLT_C_HDLC": "syscall",
"syscall.DLT_C_HDLC_WITH_DIR": "syscall",
"syscall.DLT_DBUS": "syscall",
"syscall.DLT_DECT": "syscall",
"syscall.DLT_DOCSIS": "syscall",
"syscall.DLT_DVB_CI": "syscall",
"syscall.DLT_ECONET": "syscall",
"syscall.DLT_EN10MB": "syscall",
"syscall.DLT_EN3MB": "syscall",
"syscall.DLT_ENC": "syscall",
"syscall.DLT_ERF": "syscall",
"syscall.DLT_ERF_ETH": "syscall",
"syscall.DLT_ERF_POS": "syscall",
"syscall.DLT_FC_2": "syscall",
"syscall.DLT_FC_2_WITH_FRAME_DELIMS": "syscall",
"syscall.DLT_FDDI": "syscall",
"syscall.DLT_FLEXRAY": "syscall",
"syscall.DLT_FRELAY": "syscall",
"syscall.DLT_FRELAY_WITH_DIR": "syscall",
"syscall.DLT_GCOM_SERIAL": "syscall",
"syscall.DLT_GCOM_T1E1": "syscall",
"syscall.DLT_GPF_F": "syscall",
"syscall.DLT_GPF_T": "syscall",
"syscall.DLT_GPRS_LLC": "syscall",
"syscall.DLT_GSMTAP_ABIS": "syscall",
"syscall.DLT_GSMTAP_UM": "syscall",
"syscall.DLT_HDLC": "syscall",
"syscall.DLT_HHDLC": "syscall",
"syscall.DLT_HIPPI": "syscall",
"syscall.DLT_IBM_SN": "syscall",
"syscall.DLT_IBM_SP": "syscall",
"syscall.DLT_IEEE802": "syscall",
"syscall.DLT_IEEE802_11": "syscall",
"syscall.DLT_IEEE802_11_RADIO": "syscall",
"syscall.DLT_IEEE802_11_RADIO_AVS": "syscall",
"syscall.DLT_IEEE802_15_4": "syscall",
"syscall.DLT_IEEE802_15_4_LINUX": "syscall",
"syscall.DLT_IEEE802_15_4_NOFCS": "syscall",
"syscall.DLT_IEEE802_15_4_NONASK_PHY": "syscall",
"syscall.DLT_IEEE802_16_MAC_CPS": "syscall",
"syscall.DLT_IEEE802_16_MAC_CPS_RADIO": "syscall",
"syscall.DLT_IPFILTER": "syscall",
"syscall.DLT_IPMB": "syscall",
"syscall.DLT_IPMB_LINUX": "syscall",
"syscall.DLT_IPNET": "syscall",
"syscall.DLT_IPOIB": "syscall",
"syscall.DLT_IPV4": "syscall",
"syscall.DLT_IPV6": "syscall",
"syscall.DLT_IP_OVER_FC": "syscall",
"syscall.DLT_JUNIPER_ATM1": "syscall",
"syscall.DLT_JUNIPER_ATM2": "syscall",
"syscall.DLT_JUNIPER_ATM_CEMIC": "syscall",
"syscall.DLT_JUNIPER_CHDLC": "syscall",
"syscall.DLT_JUNIPER_ES": "syscall",
"syscall.DLT_JUNIPER_ETHER": "syscall",
"syscall.DLT_JUNIPER_FIBRECHANNEL": "syscall",
"syscall.DLT_JUNIPER_FRELAY": "syscall",
"syscall.DLT_JUNIPER_GGSN": "syscall",
"syscall.DLT_JUNIPER_ISM": "syscall",
"syscall.DLT_JUNIPER_MFR": "syscall",
"syscall.DLT_JUNIPER_MLFR": "syscall",
"syscall.DLT_JUNIPER_MLPPP": "syscall",
"syscall.DLT_JUNIPER_MONITOR": "syscall",
"syscall.DLT_JUNIPER_PIC_PEER": "syscall",
"syscall.DLT_JUNIPER_PPP": "syscall",
"syscall.DLT_JUNIPER_PPPOE": "syscall",
"syscall.DLT_JUNIPER_PPPOE_ATM": "syscall",
"syscall.DLT_JUNIPER_SERVICES": "syscall",
"syscall.DLT_JUNIPER_SRX_E2E": "syscall",
"syscall.DLT_JUNIPER_ST": "syscall",
"syscall.DLT_JUNIPER_VP": "syscall",
"syscall.DLT_JUNIPER_VS": "syscall",
"syscall.DLT_LAPB_WITH_DIR": "syscall",
"syscall.DLT_LAPD": "syscall",
"syscall.DLT_LIN": "syscall",
"syscall.DLT_LINUX_EVDEV": "syscall",
"syscall.DLT_LINUX_IRDA": "syscall",
"syscall.DLT_LINUX_LAPD": "syscall",
"syscall.DLT_LINUX_PPP_WITHDIRECTION": "syscall",
"syscall.DLT_LINUX_SLL": "syscall",
"syscall.DLT_LOOP": "syscall",
"syscall.DLT_LTALK": "syscall",
"syscall.DLT_MATCHING_MAX": "syscall",
"syscall.DLT_MATCHING_MIN": "syscall",
"syscall.DLT_MFR": "syscall",
"syscall.DLT_MOST": "syscall",
"syscall.DLT_MPEG_2_TS": "syscall",
"syscall.DLT_MPLS": "syscall",
"syscall.DLT_MTP2": "syscall",
"syscall.DLT_MTP2_WITH_PHDR": "syscall",
"syscall.DLT_MTP3": "syscall",
"syscall.DLT_MUX27010": "syscall",
"syscall.DLT_NETANALYZER": "syscall",
"syscall.DLT_NETANALYZER_TRANSPARENT": "syscall",
"syscall.DLT_NFC_LLCP": "syscall",
"syscall.DLT_NFLOG": "syscall",
"syscall.DLT_NG40": "syscall",
"syscall.DLT_NULL": "syscall",
"syscall.DLT_PCI_EXP": "syscall",
"syscall.DLT_PFLOG": "syscall",
"syscall.DLT_PFSYNC": "syscall",
"syscall.DLT_PPI": "syscall",
"syscall.DLT_PPP": "syscall",
"syscall.DLT_PPP_BSDOS": "syscall",
"syscall.DLT_PPP_ETHER": "syscall",
"syscall.DLT_PPP_PPPD": "syscall",
"syscall.DLT_PPP_SERIAL": "syscall",
"syscall.DLT_PPP_WITH_DIR": "syscall",
"syscall.DLT_PPP_WITH_DIRECTION": "syscall",
"syscall.DLT_PRISM_HEADER": "syscall",
"syscall.DLT_PRONET": "syscall",
"syscall.DLT_RAIF1": "syscall",
"syscall.DLT_RAW": "syscall",
"syscall.DLT_RAWAF_MASK": "syscall",
"syscall.DLT_RIO": "syscall",
"syscall.DLT_SCCP": "syscall",
"syscall.DLT_SITA": "syscall",
"syscall.DLT_SLIP": "syscall",
"syscall.DLT_SLIP_BSDOS": "syscall",
"syscall.DLT_STANAG_5066_D_PDU": "syscall",
"syscall.DLT_SUNATM": "syscall",
"syscall.DLT_SYMANTEC_FIREWALL": "syscall",
"syscall.DLT_TZSP": "syscall",
"syscall.DLT_USB": "syscall",
"syscall.DLT_USB_LINUX": "syscall",
"syscall.DLT_USB_LINUX_MMAPPED": "syscall",
"syscall.DLT_USER0": "syscall",
"syscall.DLT_USER1": "syscall",
"syscall.DLT_USER10": "syscall",
"syscall.DLT_USER11": "syscall",
"syscall.DLT_USER12": "syscall",
"syscall.DLT_USER13": "syscall",
"syscall.DLT_USER14": "syscall",
"syscall.DLT_USER15": "syscall",
"syscall.DLT_USER2": "syscall",
"syscall.DLT_USER3": "syscall",
"syscall.DLT_USER4": "syscall",
"syscall.DLT_USER5": "syscall",
"syscall.DLT_USER6": "syscall",
"syscall.DLT_USER7": "syscall",
"syscall.DLT_USER8": "syscall",
"syscall.DLT_USER9": "syscall",
"syscall.DLT_WIHART": "syscall",
"syscall.DLT_X2E_SERIAL": "syscall",
"syscall.DLT_X2E_XORAYA": "syscall",
"syscall.DNSMXData": "syscall",
"syscall.DNSPTRData": "syscall",
"syscall.DNSRecord": "syscall",
"syscall.DNSSRVData": "syscall",
"syscall.DNSTXTData": "syscall",
"syscall.DNS_INFO_NO_RECORDS": "syscall",
"syscall.DNS_TYPE_A": "syscall",
"syscall.DNS_TYPE_A6": "syscall",
"syscall.DNS_TYPE_AAAA": "syscall",
"syscall.DNS_TYPE_ADDRS": "syscall",
"syscall.DNS_TYPE_AFSDB": "syscall",
"syscall.DNS_TYPE_ALL": "syscall",
"syscall.DNS_TYPE_ANY": "syscall",
"syscall.DNS_TYPE_ATMA": "syscall",
"syscall.DNS_TYPE_AXFR": "syscall",
"syscall.DNS_TYPE_CERT": "syscall",
"syscall.DNS_TYPE_CNAME": "syscall",
"syscall.DNS_TYPE_DHCID": "syscall",
"syscall.DNS_TYPE_DNAME": "syscall",
"syscall.DNS_TYPE_DNSKEY": "syscall",
"syscall.DNS_TYPE_DS": "syscall",
"syscall.DNS_TYPE_EID": "syscall",
"syscall.DNS_TYPE_GID": "syscall",
"syscall.DNS_TYPE_GPOS": "syscall",
"syscall.DNS_TYPE_HINFO": "syscall",
"syscall.DNS_TYPE_ISDN": "syscall",
"syscall.DNS_TYPE_IXFR": "syscall",
"syscall.DNS_TYPE_KEY": "syscall",
"syscall.DNS_TYPE_KX": "syscall",
"syscall.DNS_TYPE_LOC": "syscall",
"syscall.DNS_TYPE_MAILA": "syscall",
"syscall.DNS_TYPE_MAILB": "syscall",
"syscall.DNS_TYPE_MB": "syscall",
"syscall.DNS_TYPE_MD": "syscall",
"syscall.DNS_TYPE_MF": "syscall",
"syscall.DNS_TYPE_MG": "syscall",
"syscall.DNS_TYPE_MINFO": "syscall",
"syscall.DNS_TYPE_MR": "syscall",
"syscall.DNS_TYPE_MX": "syscall",
"syscall.DNS_TYPE_NAPTR": "syscall",
"syscall.DNS_TYPE_NBSTAT": "syscall",
"syscall.DNS_TYPE_NIMLOC": "syscall",
"syscall.DNS_TYPE_NS": "syscall",
"syscall.DNS_TYPE_NSAP": "syscall",
"syscall.DNS_TYPE_NSAPPTR": "syscall",
"syscall.DNS_TYPE_NSEC": "syscall",
"syscall.DNS_TYPE_NULL": "syscall",
"syscall.DNS_TYPE_NXT": "syscall",
"syscall.DNS_TYPE_OPT": "syscall",
"syscall.DNS_TYPE_PTR": "syscall",
"syscall.DNS_TYPE_PX": "syscall",
"syscall.DNS_TYPE_RP": "syscall",
"syscall.DNS_TYPE_RRSIG": "syscall",
"syscall.DNS_TYPE_RT": "syscall",
"syscall.DNS_TYPE_SIG": "syscall",
"syscall.DNS_TYPE_SINK": "syscall",
"syscall.DNS_TYPE_SOA": "syscall",
"syscall.DNS_TYPE_SRV": "syscall",
"syscall.DNS_TYPE_TEXT": "syscall",
"syscall.DNS_TYPE_TKEY": "syscall",
"syscall.DNS_TYPE_TSIG": "syscall",
"syscall.DNS_TYPE_UID": "syscall",
"syscall.DNS_TYPE_UINFO": "syscall",
"syscall.DNS_TYPE_UNSPEC": "syscall",
"syscall.DNS_TYPE_WINS": "syscall",
"syscall.DNS_TYPE_WINSR": "syscall",
"syscall.DNS_TYPE_WKS": "syscall",
"syscall.DNS_TYPE_X25": "syscall",
"syscall.DT_BLK": "syscall",
"syscall.DT_CHR": "syscall",
"syscall.DT_DIR": "syscall",
"syscall.DT_FIFO": "syscall",
"syscall.DT_LNK": "syscall",
"syscall.DT_REG": "syscall",
"syscall.DT_SOCK": "syscall",
"syscall.DT_UNKNOWN": "syscall",
"syscall.DT_WHT": "syscall",
"syscall.DUPLICATE_CLOSE_SOURCE": "syscall",
"syscall.DUPLICATE_SAME_ACCESS": "syscall",
"syscall.DeleteFile": "syscall",
"syscall.DetachLsf": "syscall",
"syscall.DeviceIoControl": "syscall",
"syscall.Dirent": "syscall",
"syscall.DnsNameCompare": "syscall",
"syscall.DnsQuery": "syscall",
"syscall.DnsRecordListFree": "syscall",
"syscall.DnsSectionAdditional": "syscall",
"syscall.DnsSectionAnswer": "syscall",
"syscall.DnsSectionAuthority": "syscall",
"syscall.DnsSectionQuestion": "syscall",
"syscall.Dup": "syscall",
"syscall.Dup2": "syscall",
"syscall.Dup3": "syscall",
"syscall.DuplicateHandle": "syscall",
"syscall.E2BIG": "syscall",
"syscall.EACCES": "syscall",
"syscall.EADDRINUSE": "syscall",
"syscall.EADDRNOTAVAIL": "syscall",
"syscall.EADV": "syscall",
"syscall.EAFNOSUPPORT": "syscall",
"syscall.EAGAIN": "syscall",
"syscall.EALREADY": "syscall",
"syscall.EAUTH": "syscall",
"syscall.EBADARCH": "syscall",
"syscall.EBADE": "syscall",
"syscall.EBADEXEC": "syscall",
"syscall.EBADF": "syscall",
"syscall.EBADFD": "syscall",
"syscall.EBADMACHO": "syscall",
"syscall.EBADMSG": "syscall",
"syscall.EBADR": "syscall",
"syscall.EBADRPC": "syscall",
"syscall.EBADRQC": "syscall",
"syscall.EBADSLT": "syscall",
"syscall.EBFONT": "syscall",
"syscall.EBUSY": "syscall",
"syscall.ECANCELED": "syscall",
"syscall.ECAPMODE": "syscall",
"syscall.ECHILD": "syscall",
"syscall.ECHO": "syscall",
"syscall.ECHOCTL": "syscall",
"syscall.ECHOE": "syscall",
"syscall.ECHOK": "syscall",
"syscall.ECHOKE": "syscall",
"syscall.ECHONL": "syscall",
"syscall.ECHOPRT": "syscall",
"syscall.ECHRNG": "syscall",
"syscall.ECOMM": "syscall",
"syscall.ECONNABORTED": "syscall",
"syscall.ECONNREFUSED": "syscall",
"syscall.ECONNRESET": "syscall",
"syscall.EDEADLK": "syscall",
"syscall.EDEADLOCK": "syscall",
"syscall.EDESTADDRREQ": "syscall",
"syscall.EDEVERR": "syscall",
"syscall.EDOM": "syscall",
"syscall.EDOOFUS": "syscall",
"syscall.EDOTDOT": "syscall",
"syscall.EDQUOT": "syscall",
"syscall.EEXIST": "syscall",
"syscall.EFAULT": "syscall",
"syscall.EFBIG": "syscall",
"syscall.EFER_LMA": "syscall",
"syscall.EFER_LME": "syscall",
"syscall.EFER_NXE": "syscall",
"syscall.EFER_SCE": "syscall",
"syscall.EFTYPE": "syscall",
"syscall.EHOSTDOWN": "syscall",
"syscall.EHOSTUNREACH": "syscall",
"syscall.EHWPOISON": "syscall",
"syscall.EIDRM": "syscall",
"syscall.EILSEQ": "syscall",
"syscall.EINPROGRESS": "syscall",
"syscall.EINTR": "syscall",
"syscall.EINVAL": "syscall",
"syscall.EIO": "syscall",
"syscall.EIPSEC": "syscall",
"syscall.EISCONN": "syscall",
"syscall.EISDIR": "syscall",
"syscall.EISNAM": "syscall",
"syscall.EKEYEXPIRED": "syscall",
"syscall.EKEYREJECTED": "syscall",
"syscall.EKEYREVOKED": "syscall",
"syscall.EL2HLT": "syscall",
"syscall.EL2NSYNC": "syscall",
"syscall.EL3HLT": "syscall",
"syscall.EL3RST": "syscall",
"syscall.ELAST": "syscall",
"syscall.ELF_NGREG": "syscall",
"syscall.ELF_PRARGSZ": "syscall",
"syscall.ELIBACC": "syscall",
"syscall.ELIBBAD": "syscall",
"syscall.ELIBEXEC": "syscall",
"syscall.ELIBMAX": "syscall",
"syscall.ELIBSCN": "syscall",
"syscall.ELNRNG": "syscall",
"syscall.ELOOP": "syscall",
"syscall.EMEDIUMTYPE": "syscall",
"syscall.EMFILE": "syscall",
"syscall.EMLINK": "syscall",
"syscall.EMSGSIZE": "syscall",
"syscall.EMT_TAGOVF": "syscall",
"syscall.EMULTIHOP": "syscall",
"syscall.EMUL_ENABLED": "syscall",
"syscall.EMUL_LINUX": "syscall",
"syscall.EMUL_LINUX32": "syscall",
"syscall.EMUL_MAXID": "syscall",
"syscall.EMUL_NATIVE": "syscall",
"syscall.ENAMETOOLONG": "syscall",
"syscall.ENAVAIL": "syscall",
"syscall.ENDRUNDISC": "syscall",
"syscall.ENEEDAUTH": "syscall",
"syscall.ENETDOWN": "syscall",
"syscall.ENETRESET": "syscall",
"syscall.ENETUNREACH": "syscall",
"syscall.ENFILE": "syscall",
"syscall.ENOANO": "syscall",
"syscall.ENOATTR": "syscall",
"syscall.ENOBUFS": "syscall",
"syscall.ENOCSI": "syscall",
"syscall.ENODATA": "syscall",
"syscall.ENODEV": "syscall",
"syscall.ENOENT": "syscall",
"syscall.ENOEXEC": "syscall",
"syscall.ENOKEY": "syscall",
"syscall.ENOLCK": "syscall",
"syscall.ENOLINK": "syscall",
"syscall.ENOMEDIUM": "syscall",
"syscall.ENOMEM": "syscall",
"syscall.ENOMSG": "syscall",
"syscall.ENONET": "syscall",
"syscall.ENOPKG": "syscall",
"syscall.ENOPOLICY": "syscall",
"syscall.ENOPROTOOPT": "syscall",
"syscall.ENOSPC": "syscall",
"syscall.ENOSR": "syscall",
"syscall.ENOSTR": "syscall",
"syscall.ENOSYS": "syscall",
"syscall.ENOTBLK": "syscall",
"syscall.ENOTCAPABLE": "syscall",
"syscall.ENOTCONN": "syscall",
"syscall.ENOTDIR": "syscall",
"syscall.ENOTEMPTY": "syscall",
"syscall.ENOTNAM": "syscall",
"syscall.ENOTRECOVERABLE": "syscall",
"syscall.ENOTSOCK": "syscall",
"syscall.ENOTSUP": "syscall",
"syscall.ENOTTY": "syscall",
"syscall.ENOTUNIQ": "syscall",
"syscall.ENXIO": "syscall",
"syscall.EN_SW_CTL_INF": "syscall",
"syscall.EN_SW_CTL_PREC": "syscall",
"syscall.EN_SW_CTL_ROUND": "syscall",
"syscall.EN_SW_DATACHAIN": "syscall",
"syscall.EN_SW_DENORM": "syscall",
"syscall.EN_SW_INVOP": "syscall",
"syscall.EN_SW_OVERFLOW": "syscall",
"syscall.EN_SW_PRECLOSS": "syscall",
"syscall.EN_SW_UNDERFLOW": "syscall",
"syscall.EN_SW_ZERODIV": "syscall",
"syscall.EOPNOTSUPP": "syscall",
"syscall.EOVERFLOW": "syscall",
"syscall.EOWNERDEAD": "syscall",
"syscall.EPERM": "syscall",
"syscall.EPFNOSUPPORT": "syscall",
"syscall.EPIPE": "syscall",
"syscall.EPOLLERR": "syscall",
"syscall.EPOLLET": "syscall",
"syscall.EPOLLHUP": "syscall",
"syscall.EPOLLIN": "syscall",
"syscall.EPOLLMSG": "syscall",
"syscall.EPOLLONESHOT": "syscall",
"syscall.EPOLLOUT": "syscall",
"syscall.EPOLLPRI": "syscall",
"syscall.EPOLLRDBAND": "syscall",
"syscall.EPOLLRDHUP": "syscall",
"syscall.EPOLLRDNORM": "syscall",
"syscall.EPOLLWRBAND": "syscall",
"syscall.EPOLLWRNORM": "syscall",
"syscall.EPOLL_CLOEXEC": "syscall",
"syscall.EPOLL_CTL_ADD": "syscall",
"syscall.EPOLL_CTL_DEL": "syscall",
"syscall.EPOLL_CTL_MOD": "syscall",
"syscall.EPOLL_NONBLOCK": "syscall",
"syscall.EPROCLIM": "syscall",
"syscall.EPROCUNAVAIL": "syscall",
"syscall.EPROGMISMATCH": "syscall",
"syscall.EPROGUNAVAIL": "syscall",
"syscall.EPROTO": "syscall",
"syscall.EPROTONOSUPPORT": "syscall",
"syscall.EPROTOTYPE": "syscall",
"syscall.EPWROFF": "syscall",
"syscall.ERANGE": "syscall",
"syscall.EREMCHG": "syscall",
"syscall.EREMOTE": "syscall",
"syscall.EREMOTEIO": "syscall",
"syscall.ERESTART": "syscall",
"syscall.ERFKILL": "syscall",
"syscall.EROFS": "syscall",
"syscall.ERPCMISMATCH": "syscall",
"syscall.ERROR_ACCESS_DENIED": "syscall",
"syscall.ERROR_ALREADY_EXISTS": "syscall",
"syscall.ERROR_BROKEN_PIPE": "syscall",
"syscall.ERROR_BUFFER_OVERFLOW": "syscall",
"syscall.ERROR_ENVVAR_NOT_FOUND": "syscall",
"syscall.ERROR_FILE_EXISTS": "syscall",
"syscall.ERROR_FILE_NOT_FOUND": "syscall",
"syscall.ERROR_HANDLE_EOF": "syscall",
"syscall.ERROR_INSUFFICIENT_BUFFER": "syscall",
"syscall.ERROR_IO_PENDING": "syscall",
"syscall.ERROR_MOD_NOT_FOUND": "syscall",
"syscall.ERROR_MORE_DATA": "syscall",
"syscall.ERROR_NETNAME_DELETED": "syscall",
"syscall.ERROR_NOT_FOUND": "syscall",
"syscall.ERROR_NO_MORE_FILES": "syscall",
"syscall.ERROR_OPERATION_ABORTED": "syscall",
"syscall.ERROR_PATH_NOT_FOUND": "syscall",
"syscall.ERROR_PRIVILEGE_NOT_HELD": "syscall",
"syscall.ERROR_PROC_NOT_FOUND": "syscall",
"syscall.ESHLIBVERS": "syscall",
"syscall.ESHUTDOWN": "syscall",
"syscall.ESOCKTNOSUPPORT": "syscall",
"syscall.ESPIPE": "syscall",
"syscall.ESRCH": "syscall",
"syscall.ESRMNT": "syscall",
"syscall.ESTALE": "syscall",
"syscall.ESTRPIPE": "syscall",
"syscall.ETHERCAP_JUMBO_MTU": "syscall",
"syscall.ETHERCAP_VLAN_HWTAGGING": "syscall",
"syscall.ETHERCAP_VLAN_MTU": "syscall",
"syscall.ETHERMIN": "syscall",
"syscall.ETHERMTU": "syscall",
"syscall.ETHERMTU_JUMBO": "syscall",
"syscall.ETHERTYPE_8023": "syscall",
"syscall.ETHERTYPE_AARP": "syscall",
"syscall.ETHERTYPE_ACCTON": "syscall",
"syscall.ETHERTYPE_AEONIC": "syscall",
"syscall.ETHERTYPE_ALPHA": "syscall",
"syscall.ETHERTYPE_AMBER": "syscall",
"syscall.ETHERTYPE_AMOEBA": "syscall",
"syscall.ETHERTYPE_AOE": "syscall",
"syscall.ETHERTYPE_APOLLO": "syscall",
"syscall.ETHERTYPE_APOLLODOMAIN": "syscall",
"syscall.ETHERTYPE_APPLETALK": "syscall",
"syscall.ETHERTYPE_APPLITEK": "syscall",
"syscall.ETHERTYPE_ARGONAUT": "syscall",
"syscall.ETHERTYPE_ARP": "syscall",
"syscall.ETHERTYPE_AT": "syscall",
"syscall.ETHERTYPE_ATALK": "syscall",
"syscall.ETHERTYPE_ATOMIC": "syscall",
"syscall.ETHERTYPE_ATT": "syscall",
"syscall.ETHERTYPE_ATTSTANFORD": "syscall",
"syscall.ETHERTYPE_AUTOPHON": "syscall",
"syscall.ETHERTYPE_AXIS": "syscall",
"syscall.ETHERTYPE_BCLOOP": "syscall",
"syscall.ETHERTYPE_BOFL": "syscall",
"syscall.ETHERTYPE_CABLETRON": "syscall",
"syscall.ETHERTYPE_CHAOS": "syscall",
"syscall.ETHERTYPE_COMDESIGN": "syscall",
"syscall.ETHERTYPE_COMPUGRAPHIC": "syscall",
"syscall.ETHERTYPE_COUNTERPOINT": "syscall",
"syscall.ETHERTYPE_CRONUS": "syscall",
"syscall.ETHERTYPE_CRONUSVLN": "syscall",
"syscall.ETHERTYPE_DCA": "syscall",
"syscall.ETHERTYPE_DDE": "syscall",
"syscall.ETHERTYPE_DEBNI": "syscall",
"syscall.ETHERTYPE_DECAM": "syscall",
"syscall.ETHERTYPE_DECCUST": "syscall",
"syscall.ETHERTYPE_DECDIAG": "syscall",
"syscall.ETHERTYPE_DECDNS": "syscall",
"syscall.ETHERTYPE_DECDTS": "syscall",
"syscall.ETHERTYPE_DECEXPER": "syscall",
"syscall.ETHERTYPE_DECLAST": "syscall",
"syscall.ETHERTYPE_DECLTM": "syscall",
"syscall.ETHERTYPE_DECMUMPS": "syscall",
"syscall.ETHERTYPE_DECNETBIOS": "syscall",
"syscall.ETHERTYPE_DELTACON": "syscall",
"syscall.ETHERTYPE_DIDDLE": "syscall",
"syscall.ETHERTYPE_DLOG1": "syscall",
"syscall.ETHERTYPE_DLOG2": "syscall",
"syscall.ETHERTYPE_DN": "syscall",
"syscall.ETHERTYPE_DOGFIGHT": "syscall",
"syscall.ETHERTYPE_DSMD": "syscall",
"syscall.ETHERTYPE_ECMA": "syscall",
"syscall.ETHERTYPE_ENCRYPT": "syscall",
"syscall.ETHERTYPE_ES": "syscall",
"syscall.ETHERTYPE_EXCELAN": "syscall",
"syscall.ETHERTYPE_EXPERDATA": "syscall",
"syscall.ETHERTYPE_FLIP": "syscall",
"syscall.ETHERTYPE_FLOWCONTROL": "syscall",
"syscall.ETHERTYPE_FRARP": "syscall",
"syscall.ETHERTYPE_GENDYN": "syscall",
"syscall.ETHERTYPE_HAYES": "syscall",
"syscall.ETHERTYPE_HIPPI_FP": "syscall",
"syscall.ETHERTYPE_HITACHI": "syscall",
"syscall.ETHERTYPE_HP": "syscall",
"syscall.ETHERTYPE_IEEEPUP": "syscall",
"syscall.ETHERTYPE_IEEEPUPAT": "syscall",
"syscall.ETHERTYPE_IMLBL": "syscall",
"syscall.ETHERTYPE_IMLBLDIAG": "syscall",
"syscall.ETHERTYPE_IP": "syscall",
"syscall.ETHERTYPE_IPAS": "syscall",
"syscall.ETHERTYPE_IPV6": "syscall",
"syscall.ETHERTYPE_IPX": "syscall",
"syscall.ETHERTYPE_IPXNEW": "syscall",
"syscall.ETHERTYPE_KALPANA": "syscall",
"syscall.ETHERTYPE_LANBRIDGE": "syscall",
"syscall.ETHERTYPE_LANPROBE": "syscall",
"syscall.ETHERTYPE_LAT": "syscall",
"syscall.ETHERTYPE_LBACK": "syscall",
"syscall.ETHERTYPE_LITTLE": "syscall",
"syscall.ETHERTYPE_LLDP": "syscall",
"syscall.ETHERTYPE_LOGICRAFT": "syscall",
"syscall.ETHERTYPE_LOOPBACK": "syscall",
"syscall.ETHERTYPE_MATRA": "syscall",
"syscall.ETHERTYPE_MAX": "syscall",
"syscall.ETHERTYPE_MERIT": "syscall",
"syscall.ETHERTYPE_MICP": "syscall",
"syscall.ETHERTYPE_MOPDL": "syscall",
"syscall.ETHERTYPE_MOPRC": "syscall",
"syscall.ETHERTYPE_MOTOROLA": "syscall",
"syscall.ETHERTYPE_MPLS": "syscall",
"syscall.ETHERTYPE_MPLS_MCAST": "syscall",
"syscall.ETHERTYPE_MUMPS": "syscall",
"syscall.ETHERTYPE_NBPCC": "syscall",
"syscall.ETHERTYPE_NBPCLAIM": "syscall",
"syscall.ETHERTYPE_NBPCLREQ": "syscall",
"syscall.ETHERTYPE_NBPCLRSP": "syscall",
"syscall.ETHERTYPE_NBPCREQ": "syscall",
"syscall.ETHERTYPE_NBPCRSP": "syscall",
"syscall.ETHERTYPE_NBPDG": "syscall",
"syscall.ETHERTYPE_NBPDGB": "syscall",
"syscall.ETHERTYPE_NBPDLTE": "syscall",
"syscall.ETHERTYPE_NBPRAR": "syscall",
"syscall.ETHERTYPE_NBPRAS": "syscall",
"syscall.ETHERTYPE_NBPRST": "syscall",
"syscall.ETHERTYPE_NBPSCD": "syscall",
"syscall.ETHERTYPE_NBPVCD": "syscall",
"syscall.ETHERTYPE_NBS": "syscall",
"syscall.ETHERTYPE_NCD": "syscall",
"syscall.ETHERTYPE_NESTAR": "syscall",
"syscall.ETHERTYPE_NETBEUI": "syscall",
"syscall.ETHERTYPE_NOVELL": "syscall",
"syscall.ETHERTYPE_NS": "syscall",
"syscall.ETHERTYPE_NSAT": "syscall",
"syscall.ETHERTYPE_NSCOMPAT": "syscall",
"syscall.ETHERTYPE_NTRAILER": "syscall",
"syscall.ETHERTYPE_OS9": "syscall",
"syscall.ETHERTYPE_OS9NET": "syscall",
"syscall.ETHERTYPE_PACER": "syscall",
"syscall.ETHERTYPE_PAE": "syscall",
"syscall.ETHERTYPE_PCS": "syscall",
"syscall.ETHERTYPE_PLANNING": "syscall",
"syscall.ETHERTYPE_PPP": "syscall",
"syscall.ETHERTYPE_PPPOE": "syscall",
"syscall.ETHERTYPE_PPPOEDISC": "syscall",
"syscall.ETHERTYPE_PRIMENTS": "syscall",
"syscall.ETHERTYPE_PUP": "syscall",
"syscall.ETHERTYPE_PUPAT": "syscall",
"syscall.ETHERTYPE_QINQ": "syscall",
"syscall.ETHERTYPE_RACAL": "syscall",
"syscall.ETHERTYPE_RATIONAL": "syscall",
"syscall.ETHERTYPE_RAWFR": "syscall",
"syscall.ETHERTYPE_RCL": "syscall",
"syscall.ETHERTYPE_RDP": "syscall",
"syscall.ETHERTYPE_RETIX": "syscall",
"syscall.ETHERTYPE_REVARP": "syscall",
"syscall.ETHERTYPE_SCA": "syscall",
"syscall.ETHERTYPE_SECTRA": "syscall",
"syscall.ETHERTYPE_SECUREDATA": "syscall",
"syscall.ETHERTYPE_SGITW": "syscall",
"syscall.ETHERTYPE_SG_BOUNCE": "syscall",
"syscall.ETHERTYPE_SG_DIAG": "syscall",
"syscall.ETHERTYPE_SG_NETGAMES": "syscall",
"syscall.ETHERTYPE_SG_RESV": "syscall",
"syscall.ETHERTYPE_SIMNET": "syscall",
"syscall.ETHERTYPE_SLOW": "syscall",
"syscall.ETHERTYPE_SLOWPROTOCOLS": "syscall",
"syscall.ETHERTYPE_SNA": "syscall",
"syscall.ETHERTYPE_SNMP": "syscall",
"syscall.ETHERTYPE_SONIX": "syscall",
"syscall.ETHERTYPE_SPIDER": "syscall",
"syscall.ETHERTYPE_SPRITE": "syscall",
"syscall.ETHERTYPE_STP": "syscall",
"syscall.ETHERTYPE_TALARIS": "syscall",
"syscall.ETHERTYPE_TALARISMC": "syscall",
"syscall.ETHERTYPE_TCPCOMP": "syscall",
"syscall.ETHERTYPE_TCPSM": "syscall",
"syscall.ETHERTYPE_TEC": "syscall",
"syscall.ETHERTYPE_TIGAN": "syscall",
"syscall.ETHERTYPE_TRAIL": "syscall",
"syscall.ETHERTYPE_TRANSETHER": "syscall",
"syscall.ETHERTYPE_TYMSHARE": "syscall",
"syscall.ETHERTYPE_UBBST": "syscall",
"syscall.ETHERTYPE_UBDEBUG": "syscall",
"syscall.ETHERTYPE_UBDIAGLOOP": "syscall",
"syscall.ETHERTYPE_UBDL": "syscall",
"syscall.ETHERTYPE_UBNIU": "syscall",
"syscall.ETHERTYPE_UBNMC": "syscall",
"syscall.ETHERTYPE_VALID": "syscall",
"syscall.ETHERTYPE_VARIAN": "syscall",
"syscall.ETHERTYPE_VAXELN": "syscall",
"syscall.ETHERTYPE_VEECO": "syscall",
"syscall.ETHERTYPE_VEXP": "syscall",
"syscall.ETHERTYPE_VGLAB": "syscall",
"syscall.ETHERTYPE_VINES": "syscall",
"syscall.ETHERTYPE_VINESECHO": "syscall",
"syscall.ETHERTYPE_VINESLOOP": "syscall",
"syscall.ETHERTYPE_VITAL": "syscall",
"syscall.ETHERTYPE_VLAN": "syscall",
"syscall.ETHERTYPE_VLTLMAN": "syscall",
"syscall.ETHERTYPE_VPROD": "syscall",
"syscall.ETHERTYPE_VURESERVED": "syscall",
"syscall.ETHERTYPE_WATERLOO": "syscall",
"syscall.ETHERTYPE_WELLFLEET": "syscall",
"syscall.ETHERTYPE_X25": "syscall",
"syscall.ETHERTYPE_X75": "syscall",
"syscall.ETHERTYPE_XNSSM": "syscall",
"syscall.ETHERTYPE_XTP": "syscall",
"syscall.ETHER_ADDR_LEN": "syscall",
"syscall.ETHER_ALIGN": "syscall",
"syscall.ETHER_CRC_LEN": "syscall",
"syscall.ETHER_CRC_POLY_BE": "syscall",
"syscall.ETHER_CRC_POLY_LE": "syscall",
"syscall.ETHER_HDR_LEN": "syscall",
"syscall.ETHER_MAX_DIX_LEN": "syscall",
"syscall.ETHER_MAX_LEN": "syscall",
"syscall.ETHER_MAX_LEN_JUMBO": "syscall",
"syscall.ETHER_MIN_LEN": "syscall",
"syscall.ETHER_PPPOE_ENCAP_LEN": "syscall",
"syscall.ETHER_TYPE_LEN": "syscall",
"syscall.ETHER_VLAN_ENCAP_LEN": "syscall",
"syscall.ETH_P_1588": "syscall",
"syscall.ETH_P_8021Q": "syscall",
"syscall.ETH_P_802_2": "syscall",
"syscall.ETH_P_802_3": "syscall",
"syscall.ETH_P_AARP": "syscall",
"syscall.ETH_P_ALL": "syscall",
"syscall.ETH_P_AOE": "syscall",
"syscall.ETH_P_ARCNET": "syscall",
"syscall.ETH_P_ARP": "syscall",
"syscall.ETH_P_ATALK": "syscall",
"syscall.ETH_P_ATMFATE": "syscall",
"syscall.ETH_P_ATMMPOA": "syscall",
"syscall.ETH_P_AX25": "syscall",
"syscall.ETH_P_BPQ": "syscall",
"syscall.ETH_P_CAIF": "syscall",
"syscall.ETH_P_CAN": "syscall",
"syscall.ETH_P_CONTROL": "syscall",
"syscall.ETH_P_CUST": "syscall",
"syscall.ETH_P_DDCMP": "syscall",
"syscall.ETH_P_DEC": "syscall",
"syscall.ETH_P_DIAG": "syscall",
"syscall.ETH_P_DNA_DL": "syscall",
"syscall.ETH_P_DNA_RC": "syscall",
"syscall.ETH_P_DNA_RT": "syscall",
"syscall.ETH_P_DSA": "syscall",
"syscall.ETH_P_ECONET": "syscall",
"syscall.ETH_P_EDSA": "syscall",
"syscall.ETH_P_FCOE": "syscall",
"syscall.ETH_P_FIP": "syscall",
"syscall.ETH_P_HDLC": "syscall",
"syscall.ETH_P_IEEE802154": "syscall",
"syscall.ETH_P_IEEEPUP": "syscall",
"syscall.ETH_P_IEEEPUPAT": "syscall",
"syscall.ETH_P_IP": "syscall",
"syscall.ETH_P_IPV6": "syscall",
"syscall.ETH_P_IPX": "syscall",
"syscall.ETH_P_IRDA": "syscall",
"syscall.ETH_P_LAT": "syscall",
"syscall.ETH_P_LINK_CTL": "syscall",
"syscall.ETH_P_LOCALTALK": "syscall",
"syscall.ETH_P_LOOP": "syscall",
"syscall.ETH_P_MOBITEX": "syscall",
"syscall.ETH_P_MPLS_MC": "syscall",
"syscall.ETH_P_MPLS_UC": "syscall",
"syscall.ETH_P_PAE": "syscall",
"syscall.ETH_P_PAUSE": "syscall",
"syscall.ETH_P_PHONET": "syscall",
"syscall.ETH_P_PPPTALK": "syscall",
"syscall.ETH_P_PPP_DISC": "syscall",
"syscall.ETH_P_PPP_MP": "syscall",
"syscall.ETH_P_PPP_SES": "syscall",
"syscall.ETH_P_PUP": "syscall",
"syscall.ETH_P_PUPAT": "syscall",
"syscall.ETH_P_RARP": "syscall",
"syscall.ETH_P_SCA": "syscall",
"syscall.ETH_P_SLOW": "syscall",
"syscall.ETH_P_SNAP": "syscall",
"syscall.ETH_P_TEB": "syscall",
"syscall.ETH_P_TIPC": "syscall",
"syscall.ETH_P_TRAILER": "syscall",
"syscall.ETH_P_TR_802_2": "syscall",
"syscall.ETH_P_WAN_PPP": "syscall",
"syscall.ETH_P_WCCP": "syscall",
"syscall.ETH_P_X25": "syscall",
"syscall.ETIME": "syscall",
"syscall.ETIMEDOUT": "syscall",
"syscall.ETOOMANYREFS": "syscall",
"syscall.ETXTBSY": "syscall",
"syscall.EUCLEAN": "syscall",
"syscall.EUNATCH": "syscall",
"syscall.EUSERS": "syscall",
"syscall.EVFILT_AIO": "syscall",
"syscall.EVFILT_FS": "syscall",
"syscall.EVFILT_LIO": "syscall",
"syscall.EVFILT_MACHPORT": "syscall",
"syscall.EVFILT_PROC": "syscall",
"syscall.EVFILT_READ": "syscall",
"syscall.EVFILT_SIGNAL": "syscall",
"syscall.EVFILT_SYSCOUNT": "syscall",
"syscall.EVFILT_THREADMARKER": "syscall",
"syscall.EVFILT_TIMER": "syscall",
"syscall.EVFILT_USER": "syscall",
"syscall.EVFILT_VM": "syscall",
"syscall.EVFILT_VNODE": "syscall",
"syscall.EVFILT_WRITE": "syscall",
"syscall.EV_ADD": "syscall",
"syscall.EV_CLEAR": "syscall",
"syscall.EV_DELETE": "syscall",
"syscall.EV_DISABLE": "syscall",
"syscall.EV_DISPATCH": "syscall",
"syscall.EV_DROP": "syscall",
"syscall.EV_ENABLE": "syscall",
"syscall.EV_EOF": "syscall",
"syscall.EV_ERROR": "syscall",
"syscall.EV_FLAG0": "syscall",
"syscall.EV_FLAG1": "syscall",
"syscall.EV_ONESHOT": "syscall",
"syscall.EV_OOBAND": "syscall",
"syscall.EV_POLL": "syscall",
"syscall.EV_RECEIPT": "syscall",
"syscall.EV_SYSFLAGS": "syscall",
"syscall.EWINDOWS": "syscall",
"syscall.EWOULDBLOCK": "syscall",
"syscall.EXDEV": "syscall",
"syscall.EXFULL": "syscall",
"syscall.EXTA": "syscall",
"syscall.EXTB": "syscall",
"syscall.EXTPROC": "syscall",
"syscall.Environ": "syscall",
"syscall.EpollCreate": "syscall",
"syscall.EpollCreate1": "syscall",
"syscall.EpollCtl": "syscall",
"syscall.EpollEvent": "syscall",
"syscall.EpollWait": "syscall",
"syscall.Errno": "syscall",
"syscall.EscapeArg": "syscall",
"syscall.Exchangedata": "syscall",
"syscall.Exec": "syscall",
"syscall.Exit": "syscall",
"syscall.ExitProcess": "syscall",
"syscall.FD_CLOEXEC": "syscall",
"syscall.FD_SETSIZE": "syscall",
"syscall.FILE_ACTION_ADDED": "syscall",
"syscall.FILE_ACTION_MODIFIED": "syscall",
"syscall.FILE_ACTION_REMOVED": "syscall",
"syscall.FILE_ACTION_RENAMED_NEW_NAME": "syscall",
"syscall.FILE_ACTION_RENAMED_OLD_NAME": "syscall",
"syscall.FILE_APPEND_DATA": "syscall",
"syscall.FILE_ATTRIBUTE_ARCHIVE": "syscall",
"syscall.FILE_ATTRIBUTE_DIRECTORY": "syscall",
"syscall.FILE_ATTRIBUTE_HIDDEN": "syscall",
"syscall.FILE_ATTRIBUTE_NORMAL": "syscall",
"syscall.FILE_ATTRIBUTE_READONLY": "syscall",
"syscall.FILE_ATTRIBUTE_REPARSE_POINT": "syscall",
"syscall.FILE_ATTRIBUTE_SYSTEM": "syscall",
"syscall.FILE_BEGIN": "syscall",
"syscall.FILE_CURRENT": "syscall",
"syscall.FILE_END": "syscall",
"syscall.FILE_FLAG_BACKUP_SEMANTICS": "syscall",
"syscall.FILE_FLAG_OPEN_REPARSE_POINT": "syscall",
"syscall.FILE_FLAG_OVERLAPPED": "syscall",
"syscall.FILE_LIST_DIRECTORY": "syscall",
"syscall.FILE_MAP_COPY": "syscall",
"syscall.FILE_MAP_EXECUTE": "syscall",
"syscall.FILE_MAP_READ": "syscall",
"syscall.FILE_MAP_WRITE": "syscall",
"syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES": "syscall",
"syscall.FILE_NOTIFY_CHANGE_CREATION": "syscall",
"syscall.FILE_NOTIFY_CHANGE_DIR_NAME": "syscall",
"syscall.FILE_NOTIFY_CHANGE_FILE_NAME": "syscall",
"syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS": "syscall",
"syscall.FILE_NOTIFY_CHANGE_LAST_WRITE": "syscall",
"syscall.FILE_NOTIFY_CHANGE_SIZE": "syscall",
"syscall.FILE_SHARE_DELETE": "syscall",
"syscall.FILE_SHARE_READ": "syscall",
"syscall.FILE_SHARE_WRITE": "syscall",
"syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS": "syscall",
"syscall.FILE_SKIP_SET_EVENT_ON_HANDLE": "syscall",
"syscall.FILE_TYPE_CHAR": "syscall",
"syscall.FILE_TYPE_DISK": "syscall",
"syscall.FILE_TYPE_PIPE": "syscall",
"syscall.FILE_TYPE_REMOTE": "syscall",
"syscall.FILE_TYPE_UNKNOWN": "syscall",
"syscall.FILE_WRITE_ATTRIBUTES": "syscall",
"syscall.FLUSHO": "syscall",
"syscall.FORMAT_MESSAGE_ALLOCATE_BUFFER": "syscall",
"syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY": "syscall",
"syscall.FORMAT_MESSAGE_FROM_HMODULE": "syscall",
"syscall.FORMAT_MESSAGE_FROM_STRING": "syscall",
"syscall.FORMAT_MESSAGE_FROM_SYSTEM": "syscall",
"syscall.FORMAT_MESSAGE_IGNORE_INSERTS": "syscall",
"syscall.FORMAT_MESSAGE_MAX_WIDTH_MASK": "syscall",
"syscall.FSCTL_GET_REPARSE_POINT": "syscall",
"syscall.F_ADDFILESIGS": "syscall",
"syscall.F_ADDSIGS": "syscall",
"syscall.F_ALLOCATEALL": "syscall",
"syscall.F_ALLOCATECONTIG": "syscall",
"syscall.F_CANCEL": "syscall",
"syscall.F_CHKCLEAN": "syscall",
"syscall.F_CLOSEM": "syscall",
"syscall.F_DUP2FD": "syscall",
"syscall.F_DUP2FD_CLOEXEC": "syscall",
"syscall.F_DUPFD": "syscall",
"syscall.F_DUPFD_CLOEXEC": "syscall",
"syscall.F_EXLCK": "syscall",
"syscall.F_FLUSH_DATA": "syscall",
"syscall.F_FREEZE_FS": "syscall",
"syscall.F_FSCTL": "syscall",
"syscall.F_FSDIRMASK": "syscall",
"syscall.F_FSIN": "syscall",
"syscall.F_FSINOUT": "syscall",
"syscall.F_FSOUT": "syscall",
"syscall.F_FSPRIV": "syscall",
"syscall.F_FSVOID": "syscall",
"syscall.F_FULLFSYNC": "syscall",
"syscall.F_GETFD": "syscall",
"syscall.F_GETFL": "syscall",
"syscall.F_GETLEASE": "syscall",
"syscall.F_GETLK": "syscall",
"syscall.F_GETLK64": "syscall",
"syscall.F_GETLKPID": "syscall",
"syscall.F_GETNOSIGPIPE": "syscall",
"syscall.F_GETOWN": "syscall",
"syscall.F_GETOWN_EX": "syscall",
"syscall.F_GETPATH": "syscall",
"syscall.F_GETPATH_MTMINFO": "syscall",
"syscall.F_GETPIPE_SZ": "syscall",
"syscall.F_GETPROTECTIONCLASS": "syscall",
"syscall.F_GETSIG": "syscall",
"syscall.F_GLOBAL_NOCACHE": "syscall",
"syscall.F_LOCK": "syscall",
"syscall.F_LOG2PHYS": "syscall",
"syscall.F_LOG2PHYS_EXT": "syscall",
"syscall.F_MARKDEPENDENCY": "syscall",
"syscall.F_MAXFD": "syscall",
"syscall.F_NOCACHE": "syscall",
"syscall.F_NODIRECT": "syscall",
"syscall.F_NOTIFY": "syscall",
"syscall.F_OGETLK": "syscall",
"syscall.F_OK": "syscall",
"syscall.F_OSETLK": "syscall",
"syscall.F_OSETLKW": "syscall",
"syscall.F_PARAM_MASK": "syscall",
"syscall.F_PARAM_MAX": "syscall",
"syscall.F_PATHPKG_CHECK": "syscall",
"syscall.F_PEOFPOSMODE": "syscall",
"syscall.F_PREALLOCATE": "syscall",
"syscall.F_RDADVISE": "syscall",
"syscall.F_RDAHEAD": "syscall",
"syscall.F_RDLCK": "syscall",
"syscall.F_READAHEAD": "syscall",
"syscall.F_READBOOTSTRAP": "syscall",
"syscall.F_SETBACKINGSTORE": "syscall",
"syscall.F_SETFD": "syscall",
"syscall.F_SETFL": "syscall",
"syscall.F_SETLEASE": "syscall",
"syscall.F_SETLK": "syscall",
"syscall.F_SETLK64": "syscall",
"syscall.F_SETLKW": "syscall",
"syscall.F_SETLKW64": "syscall",
"syscall.F_SETLK_REMOTE": "syscall",
"syscall.F_SETNOSIGPIPE": "syscall",
"syscall.F_SETOWN": "syscall",
"syscall.F_SETOWN_EX": "syscall",
"syscall.F_SETPIPE_SZ": "syscall",
"syscall.F_SETPROTECTIONCLASS": "syscall",
"syscall.F_SETSIG": "syscall",
"syscall.F_SETSIZE": "syscall",
"syscall.F_SHLCK": "syscall",
"syscall.F_TEST": "syscall",
"syscall.F_THAW_FS": "syscall",
"syscall.F_TLOCK": "syscall",
"syscall.F_ULOCK": "syscall",
"syscall.F_UNLCK": "syscall",
"syscall.F_UNLCKSYS": "syscall",
"syscall.F_VOLPOSMODE": "syscall",
"syscall.F_WRITEBOOTSTRAP": "syscall",
"syscall.F_WRLCK": "syscall",
"syscall.Faccessat": "syscall",
"syscall.Fallocate": "syscall",
"syscall.Fbootstraptransfer_t": "syscall",
"syscall.Fchdir": "syscall",
"syscall.Fchflags": "syscall",
"syscall.Fchmod": "syscall",
"syscall.Fchmodat": "syscall",
"syscall.Fchown": "syscall",
"syscall.Fchownat": "syscall",
"syscall.FcntlFlock": "syscall",
"syscall.FdSet": "syscall",
"syscall.Fdatasync": "syscall",
"syscall.FileNotifyInformation": "syscall",
"syscall.Filetime": "syscall",
"syscall.FindClose": "syscall",
"syscall.FindFirstFile": "syscall",
"syscall.FindNextFile": "syscall",
"syscall.Flock": "syscall",
"syscall.Flock_t": "syscall",
"syscall.FlushBpf": "syscall",
"syscall.FlushFileBuffers": "syscall",
"syscall.FlushViewOfFile": "syscall",
"syscall.ForkExec": "syscall",
"syscall.ForkLock": "syscall",
"syscall.FormatMessage": "syscall",
"syscall.Fpathconf": "syscall",
"syscall.FreeAddrInfoW": "syscall",
"syscall.FreeEnvironmentStrings": "syscall",
"syscall.FreeLibrary": "syscall",
"syscall.Fsid": "syscall",
"syscall.Fstat": "syscall",
"syscall.Fstatfs": "syscall",
"syscall.Fstore_t": "syscall",
"syscall.Fsync": "syscall",
"syscall.Ftruncate": "syscall",
"syscall.FullPath": "syscall",
"syscall.Futimes": "syscall",
"syscall.Futimesat": "syscall",
"syscall.GENERIC_ALL": "syscall",
"syscall.GENERIC_EXECUTE": "syscall",
"syscall.GENERIC_READ": "syscall",
"syscall.GENERIC_WRITE": "syscall",
"syscall.GUID": "syscall",
"syscall.GetAcceptExSockaddrs": "syscall",
"syscall.GetAdaptersInfo": "syscall",
"syscall.GetAddrInfoW": "syscall",
"syscall.GetCommandLine": "syscall",
"syscall.GetComputerName": "syscall",
"syscall.GetConsoleMode": "syscall",
"syscall.GetCurrentDirectory": "syscall",
"syscall.GetCurrentProcess": "syscall",
"syscall.GetEnvironmentStrings": "syscall",
"syscall.GetEnvironmentVariable": "syscall",
"syscall.GetExitCodeProcess": "syscall",
"syscall.GetFileAttributes": "syscall",
"syscall.GetFileAttributesEx": "syscall",
"syscall.GetFileExInfoStandard": "syscall",
"syscall.GetFileExMaxInfoLevel": "syscall",
"syscall.GetFileInformationByHandle": "syscall",
"syscall.GetFileType": "syscall",
"syscall.GetFullPathName": "syscall",
"syscall.GetHostByName": "syscall",
"syscall.GetIfEntry": "syscall",
"syscall.GetLastError": "syscall",
"syscall.GetLengthSid": "syscall",
"syscall.GetLongPathName": "syscall",
"syscall.GetProcAddress": "syscall",
"syscall.GetProcessTimes": "syscall",
"syscall.GetProtoByName": "syscall",
"syscall.GetQueuedCompletionStatus": "syscall",
"syscall.GetServByName": "syscall",
"syscall.GetShortPathName": "syscall",
"syscall.GetStartupInfo": "syscall",
"syscall.GetStdHandle": "syscall",
"syscall.GetSystemTimeAsFileTime": "syscall",
"syscall.GetTempPath": "syscall",
"syscall.GetTimeZoneInformation": "syscall",
"syscall.GetTokenInformation": "syscall",
"syscall.GetUserNameEx": "syscall",
"syscall.GetUserProfileDirectory": "syscall",
"syscall.GetVersion": "syscall",
"syscall.Getcwd": "syscall",
"syscall.Getdents": "syscall",
"syscall.Getdirentries": "syscall",
"syscall.Getdtablesize": "syscall",
"syscall.Getegid": "syscall",
"syscall.Getenv": "syscall",
"syscall.Geteuid": "syscall",
"syscall.Getfsstat": "syscall",
"syscall.Getgid": "syscall",
"syscall.Getgroups": "syscall",
"syscall.Getpagesize": "syscall",
"syscall.Getpeername": "syscall",
"syscall.Getpgid": "syscall",
"syscall.Getpgrp": "syscall",
"syscall.Getpid": "syscall",
"syscall.Getppid": "syscall",
"syscall.Getpriority": "syscall",
"syscall.Getrlimit": "syscall",
"syscall.Getrusage": "syscall",
"syscall.Getsid": "syscall",
"syscall.Getsockname": "syscall",
"syscall.Getsockopt": "syscall",
"syscall.GetsockoptByte": "syscall",
"syscall.GetsockoptICMPv6Filter": "syscall",
"syscall.GetsockoptIPMreq": "syscall",
"syscall.GetsockoptIPMreqn": "syscall",
"syscall.GetsockoptIPv6MTUInfo": "syscall",
"syscall.GetsockoptIPv6Mreq": "syscall",
"syscall.GetsockoptInet4Addr": "syscall",
"syscall.GetsockoptInt": "syscall",
"syscall.GetsockoptUcred": "syscall",
"syscall.Gettid": "syscall",
"syscall.Gettimeofday": "syscall",
"syscall.Getuid": "syscall",
"syscall.Getwd": "syscall",
"syscall.Getxattr": "syscall",
"syscall.HANDLE_FLAG_INHERIT": "syscall",
"syscall.HKEY_CLASSES_ROOT": "syscall",
"syscall.HKEY_CURRENT_CONFIG": "syscall",
"syscall.HKEY_CURRENT_USER": "syscall",
"syscall.HKEY_DYN_DATA": "syscall",
"syscall.HKEY_LOCAL_MACHINE": "syscall",
"syscall.HKEY_PERFORMANCE_DATA": "syscall",
"syscall.HKEY_USERS": "syscall",
"syscall.HUPCL": "syscall",
"syscall.Handle": "syscall",
"syscall.Hostent": "syscall",
"syscall.ICANON": "syscall",
"syscall.ICMP6_FILTER": "syscall",
"syscall.ICMPV6_FILTER": "syscall",
"syscall.ICMPv6Filter": "syscall",
"syscall.ICRNL": "syscall",
"syscall.IEXTEN": "syscall",
"syscall.IFAN_ARRIVAL": "syscall",
"syscall.IFAN_DEPARTURE": "syscall",
"syscall.IFA_ADDRESS": "syscall",
"syscall.IFA_ANYCAST": "syscall",
"syscall.IFA_BROADCAST": "syscall",
"syscall.IFA_CACHEINFO": "syscall",
"syscall.IFA_F_DADFAILED": "syscall",
"syscall.IFA_F_DEPRECATED": "syscall",
"syscall.IFA_F_HOMEADDRESS": "syscall",
"syscall.IFA_F_NODAD": "syscall",
"syscall.IFA_F_OPTIMISTIC": "syscall",
"syscall.IFA_F_PERMANENT": "syscall",
"syscall.IFA_F_SECONDARY": "syscall",
"syscall.IFA_F_TEMPORARY": "syscall",
"syscall.IFA_F_TENTATIVE": "syscall",
"syscall.IFA_LABEL": "syscall",
"syscall.IFA_LOCAL": "syscall",
"syscall.IFA_MAX": "syscall",
"syscall.IFA_MULTICAST": "syscall",
"syscall.IFA_ROUTE": "syscall",
"syscall.IFA_UNSPEC": "syscall",
"syscall.IFF_ALLMULTI": "syscall",
"syscall.IFF_ALTPHYS": "syscall",
"syscall.IFF_AUTOMEDIA": "syscall",
"syscall.IFF_BROADCAST": "syscall",
"syscall.IFF_CANTCHANGE": "syscall",
"syscall.IFF_CANTCONFIG": "syscall",
"syscall.IFF_DEBUG": "syscall",
"syscall.IFF_DRV_OACTIVE": "syscall",
"syscall.IFF_DRV_RUNNING": "syscall",
"syscall.IFF_DYING": "syscall",
"syscall.IFF_DYNAMIC": "syscall",
"syscall.IFF_LINK0": "syscall",
"syscall.IFF_LINK1": "syscall",
"syscall.IFF_LINK2": "syscall",
"syscall.IFF_LOOPBACK": "syscall",
"syscall.IFF_MASTER": "syscall",
"syscall.IFF_MONITOR": "syscall",
"syscall.IFF_MULTICAST": "syscall",
"syscall.IFF_NOARP": "syscall",
"syscall.IFF_NOTRAILERS": "syscall",
"syscall.IFF_NO_PI": "syscall",
"syscall.IFF_OACTIVE": "syscall",
"syscall.IFF_ONE_QUEUE": "syscall",
"syscall.IFF_POINTOPOINT": "syscall",
"syscall.IFF_POINTTOPOINT": "syscall",
"syscall.IFF_PORTSEL": "syscall",
"syscall.IFF_PPROMISC": "syscall",
"syscall.IFF_PROMISC": "syscall",
"syscall.IFF_RENAMING": "syscall",
"syscall.IFF_RUNNING": "syscall",
"syscall.IFF_SIMPLEX": "syscall",
"syscall.IFF_SLAVE": "syscall",
"syscall.IFF_SMART": "syscall",
"syscall.IFF_STATICARP": "syscall",
"syscall.IFF_TAP": "syscall",
"syscall.IFF_TUN": "syscall",
"syscall.IFF_TUN_EXCL": "syscall",
"syscall.IFF_UP": "syscall",
"syscall.IFF_VNET_HDR": "syscall",
"syscall.IFLA_ADDRESS": "syscall",
"syscall.IFLA_BROADCAST": "syscall",
"syscall.IFLA_COST": "syscall",
"syscall.IFLA_IFALIAS": "syscall",
"syscall.IFLA_IFNAME": "syscall",
"syscall.IFLA_LINK": "syscall",
"syscall.IFLA_LINKINFO": "syscall",
"syscall.IFLA_LINKMODE": "syscall",
"syscall.IFLA_MAP": "syscall",
"syscall.IFLA_MASTER": "syscall",
"syscall.IFLA_MAX": "syscall",
"syscall.IFLA_MTU": "syscall",
"syscall.IFLA_NET_NS_PID": "syscall",
"syscall.IFLA_OPERSTATE": "syscall",
"syscall.IFLA_PRIORITY": "syscall",
"syscall.IFLA_PROTINFO": "syscall",
"syscall.IFLA_QDISC": "syscall",
"syscall.IFLA_STATS": "syscall",
"syscall.IFLA_TXQLEN": "syscall",
"syscall.IFLA_UNSPEC": "syscall",
"syscall.IFLA_WEIGHT": "syscall",
"syscall.IFLA_WIRELESS": "syscall",
"syscall.IFNAMSIZ": "syscall",
"syscall.IFT_1822": "syscall",
"syscall.IFT_A12MPPSWITCH": "syscall",
"syscall.IFT_AAL2": "syscall",
"syscall.IFT_AAL5": "syscall",
"syscall.IFT_ADSL": "syscall",
"syscall.IFT_AFLANE8023": "syscall",
"syscall.IFT_AFLANE8025": "syscall",
"syscall.IFT_ARAP": "syscall",
"syscall.IFT_ARCNET": "syscall",
"syscall.IFT_ARCNETPLUS": "syscall",
"syscall.IFT_ASYNC": "syscall",
"syscall.IFT_ATM": "syscall",
"syscall.IFT_ATMDXI": "syscall",
"syscall.IFT_ATMFUNI": "syscall",
"syscall.IFT_ATMIMA": "syscall",
"syscall.IFT_ATMLOGICAL": "syscall",
"syscall.IFT_ATMRADIO": "syscall",
"syscall.IFT_ATMSUBINTERFACE": "syscall",
"syscall.IFT_ATMVCIENDPT": "syscall",
"syscall.IFT_ATMVIRTUAL": "syscall",
"syscall.IFT_BGPPOLICYACCOUNTING": "syscall",
"syscall.IFT_BLUETOOTH": "syscall",
"syscall.IFT_BRIDGE": "syscall",
"syscall.IFT_BSC": "syscall",
"syscall.IFT_CARP": "syscall",
"syscall.IFT_CCTEMUL": "syscall",
"syscall.IFT_CELLULAR": "syscall",
"syscall.IFT_CEPT": "syscall",
"syscall.IFT_CES": "syscall",
"syscall.IFT_CHANNEL": "syscall",
"syscall.IFT_CNR": "syscall",
"syscall.IFT_COFFEE": "syscall",
"syscall.IFT_COMPOSITELINK": "syscall",
"syscall.IFT_DCN": "syscall",
"syscall.IFT_DIGITALPOWERLINE": "syscall",
"syscall.IFT_DIGITALWRAPPEROVERHEADCHANNEL": "syscall",
"syscall.IFT_DLSW": "syscall",
"syscall.IFT_DOCSCABLEDOWNSTREAM": "syscall",
"syscall.IFT_DOCSCABLEMACLAYER": "syscall",
"syscall.IFT_DOCSCABLEUPSTREAM": "syscall",
"syscall.IFT_DOCSCABLEUPSTREAMCHANNEL": "syscall",
"syscall.IFT_DS0": "syscall",
"syscall.IFT_DS0BUNDLE": "syscall",
"syscall.IFT_DS1FDL": "syscall",
"syscall.IFT_DS3": "syscall",
"syscall.IFT_DTM": "syscall",
"syscall.IFT_DUMMY": "syscall",
"syscall.IFT_DVBASILN": "syscall",
"syscall.IFT_DVBASIOUT": "syscall",
"syscall.IFT_DVBRCCDOWNSTREAM": "syscall",
"syscall.IFT_DVBRCCMACLAYER": "syscall",
"syscall.IFT_DVBRCCUPSTREAM": "syscall",
"syscall.IFT_ECONET": "syscall",
"syscall.IFT_ENC": "syscall",
"syscall.IFT_EON": "syscall",
"syscall.IFT_EPLRS": "syscall",
"syscall.IFT_ESCON": "syscall",
"syscall.IFT_ETHER": "syscall",
"syscall.IFT_FAITH": "syscall",
"syscall.IFT_FAST": "syscall",
"syscall.IFT_FASTETHER": "syscall",
"syscall.IFT_FASTETHERFX": "syscall",
"syscall.IFT_FDDI": "syscall",
"syscall.IFT_FIBRECHANNEL": "syscall",
"syscall.IFT_FRAMERELAYINTERCONNECT": "syscall",
"syscall.IFT_FRAMERELAYMPI": "syscall",
"syscall.IFT_FRDLCIENDPT": "syscall",
"syscall.IFT_FRELAY": "syscall",
"syscall.IFT_FRELAYDCE": "syscall",
"syscall.IFT_FRF16MFRBUNDLE": "syscall",
"syscall.IFT_FRFORWARD": "syscall",
"syscall.IFT_G703AT2MB": "syscall",
"syscall.IFT_G703AT64K": "syscall",
"syscall.IFT_GIF": "syscall",
"syscall.IFT_GIGABITETHERNET": "syscall",
"syscall.IFT_GR303IDT": "syscall",
"syscall.IFT_GR303RDT": "syscall",
"syscall.IFT_H323GATEKEEPER": "syscall",
"syscall.IFT_H323PROXY": "syscall",
"syscall.IFT_HDH1822": "syscall",
"syscall.IFT_HDLC": "syscall",
"syscall.IFT_HDSL2": "syscall",
"syscall.IFT_HIPERLAN2": "syscall",
"syscall.IFT_HIPPI": "syscall",
"syscall.IFT_HIPPIINTERFACE": "syscall",
"syscall.IFT_HOSTPAD": "syscall",
"syscall.IFT_HSSI": "syscall",
"syscall.IFT_HY": "syscall",
"syscall.IFT_IBM370PARCHAN": "syscall",
"syscall.IFT_IDSL": "syscall",
"syscall.IFT_IEEE1394": "syscall",
"syscall.IFT_IEEE80211": "syscall",
"syscall.IFT_IEEE80212": "syscall",
"syscall.IFT_IEEE8023ADLAG": "syscall",
"syscall.IFT_IFGSN": "syscall",
"syscall.IFT_IMT": "syscall",
"syscall.IFT_INFINIBAND": "syscall",
"syscall.IFT_INTERLEAVE": "syscall",
"syscall.IFT_IP": "syscall",
"syscall.IFT_IPFORWARD": "syscall",
"syscall.IFT_IPOVERATM": "syscall",
"syscall.IFT_IPOVERCDLC": "syscall",
"syscall.IFT_IPOVERCLAW": "syscall",
"syscall.IFT_IPSWITCH": "syscall",
"syscall.IFT_IPXIP": "syscall",
"syscall.IFT_ISDN": "syscall",
"syscall.IFT_ISDNBASIC": "syscall",
"syscall.IFT_ISDNPRIMARY": "syscall",
"syscall.IFT_ISDNS": "syscall",
"syscall.IFT_ISDNU": "syscall",
"syscall.IFT_ISO88022LLC": "syscall",
"syscall.IFT_ISO88023": "syscall",
"syscall.IFT_ISO88024": "syscall",
"syscall.IFT_ISO88025": "syscall",
"syscall.IFT_ISO88025CRFPINT": "syscall",
"syscall.IFT_ISO88025DTR": "syscall",
"syscall.IFT_ISO88025FIBER": "syscall",
"syscall.IFT_ISO88026": "syscall",
"syscall.IFT_ISUP": "syscall",
"syscall.IFT_L2VLAN": "syscall",
"syscall.IFT_L3IPVLAN": "syscall",
"syscall.IFT_L3IPXVLAN": "syscall",
"syscall.IFT_LAPB": "syscall",
"syscall.IFT_LAPD": "syscall",
"syscall.IFT_LAPF": "syscall",
"syscall.IFT_LINEGROUP": "syscall",
"syscall.IFT_LOCALTALK": "syscall",
"syscall.IFT_LOOP": "syscall",
"syscall.IFT_MEDIAMAILOVERIP": "syscall",
"syscall.IFT_MFSIGLINK": "syscall",
"syscall.IFT_MIOX25": "syscall",
"syscall.IFT_MODEM": "syscall",
"syscall.IFT_MPC": "syscall",
"syscall.IFT_MPLS": "syscall",
"syscall.IFT_MPLSTUNNEL": "syscall",
"syscall.IFT_MSDSL": "syscall",
"syscall.IFT_MVL": "syscall",
"syscall.IFT_MYRINET": "syscall",
"syscall.IFT_NFAS": "syscall",
"syscall.IFT_NSIP": "syscall",
"syscall.IFT_OPTICALCHANNEL": "syscall",
"syscall.IFT_OPTICALTRANSPORT": "syscall",
"syscall.IFT_OTHER": "syscall",
"syscall.IFT_P10": "syscall",
"syscall.IFT_P80": "syscall",
"syscall.IFT_PARA": "syscall",
"syscall.IFT_PDP": "syscall",
"syscall.IFT_PFLOG": "syscall",
"syscall.IFT_PFLOW": "syscall",
"syscall.IFT_PFSYNC": "syscall",
"syscall.IFT_PLC": "syscall",
"syscall.IFT_PON155": "syscall",
"syscall.IFT_PON622": "syscall",
"syscall.IFT_POS": "syscall",
"syscall.IFT_PPP": "syscall",
"syscall.IFT_PPPMULTILINKBUNDLE": "syscall",
"syscall.IFT_PROPATM": "syscall",
"syscall.IFT_PROPBWAP2MP": "syscall",
"syscall.IFT_PROPCNLS": "syscall",
"syscall.IFT_PROPDOCSWIRELESSDOWNSTREAM": "syscall",
"syscall.IFT_PROPDOCSWIRELESSMACLAYER": "syscall",
"syscall.IFT_PROPDOCSWIRELESSUPSTREAM": "syscall",
"syscall.IFT_PROPMUX": "syscall",
"syscall.IFT_PROPVIRTUAL": "syscall",
"syscall.IFT_PROPWIRELESSP2P": "syscall",
"syscall.IFT_PTPSERIAL": "syscall",
"syscall.IFT_PVC": "syscall",
"syscall.IFT_Q2931": "syscall",
"syscall.IFT_QLLC": "syscall",
"syscall.IFT_RADIOMAC": "syscall",
"syscall.IFT_RADSL": "syscall",
"syscall.IFT_REACHDSL": "syscall",
"syscall.IFT_RFC1483": "syscall",
"syscall.IFT_RS232": "syscall",
"syscall.IFT_RSRB": "syscall",
"syscall.IFT_SDLC": "syscall",
"syscall.IFT_SDSL": "syscall",
"syscall.IFT_SHDSL": "syscall",
"syscall.IFT_SIP": "syscall",
"syscall.IFT_SIPSIG": "syscall",
"syscall.IFT_SIPTG": "syscall",
"syscall.IFT_SLIP": "syscall",
"syscall.IFT_SMDSDXI": "syscall",
"syscall.IFT_SMDSICIP": "syscall",
"syscall.IFT_SONET": "syscall",
"syscall.IFT_SONETOVERHEADCHANNEL": "syscall",
"syscall.IFT_SONETPATH": "syscall",
"syscall.IFT_SONETVT": "syscall",
"syscall.IFT_SRP": "syscall",
"syscall.IFT_SS7SIGLINK": "syscall",
"syscall.IFT_STACKTOSTACK": "syscall",
"syscall.IFT_STARLAN": "syscall",
"syscall.IFT_STF": "syscall",
"syscall.IFT_T1": "syscall",
"syscall.IFT_TDLC": "syscall",
"syscall.IFT_TELINK": "syscall",
"syscall.IFT_TERMPAD": "syscall",
"syscall.IFT_TR008": "syscall",
"syscall.IFT_TRANSPHDLC": "syscall",
"syscall.IFT_TUNNEL": "syscall",
"syscall.IFT_ULTRA": "syscall",
"syscall.IFT_USB": "syscall",
"syscall.IFT_V11": "syscall",
"syscall.IFT_V35": "syscall",
"syscall.IFT_V36": "syscall",
"syscall.IFT_V37": "syscall",
"syscall.IFT_VDSL": "syscall",
"syscall.IFT_VIRTUALIPADDRESS": "syscall",
"syscall.IFT_VIRTUALTG": "syscall",
"syscall.IFT_VOICEDID": "syscall",
"syscall.IFT_VOICEEM": "syscall",
"syscall.IFT_VOICEEMFGD": "syscall",
"syscall.IFT_VOICEENCAP": "syscall",
"syscall.IFT_VOICEFGDEANA": "syscall",
"syscall.IFT_VOICEFXO": "syscall",
"syscall.IFT_VOICEFXS": "syscall",
"syscall.IFT_VOICEOVERATM": "syscall",
"syscall.IFT_VOICEOVERCABLE": "syscall",
"syscall.IFT_VOICEOVERFRAMERELAY": "syscall",
"syscall.IFT_VOICEOVERIP": "syscall",
"syscall.IFT_X213": "syscall",
"syscall.IFT_X25": "syscall",
"syscall.IFT_X25DDN": "syscall",
"syscall.IFT_X25HUNTGROUP": "syscall",
"syscall.IFT_X25MLP": "syscall",
"syscall.IFT_X25PLE": "syscall",
"syscall.IFT_XETHER": "syscall",
"syscall.IGNBRK": "syscall",
"syscall.IGNCR": "syscall",
"syscall.IGNORE": "syscall",
"syscall.IGNPAR": "syscall",
"syscall.IMAXBEL": "syscall",
"syscall.INFINITE": "syscall",
"syscall.INLCR": "syscall",
"syscall.INPCK": "syscall",
"syscall.INVALID_FILE_ATTRIBUTES": "syscall",
"syscall.IN_ACCESS": "syscall",
"syscall.IN_ALL_EVENTS": "syscall",
"syscall.IN_ATTRIB": "syscall",
"syscall.IN_CLASSA_HOST": "syscall",
"syscall.IN_CLASSA_MAX": "syscall",
"syscall.IN_CLASSA_NET": "syscall",
"syscall.IN_CLASSA_NSHIFT": "syscall",
"syscall.IN_CLASSB_HOST": "syscall",
"syscall.IN_CLASSB_MAX": "syscall",
"syscall.IN_CLASSB_NET": "syscall",
"syscall.IN_CLASSB_NSHIFT": "syscall",
"syscall.IN_CLASSC_HOST": "syscall",
"syscall.IN_CLASSC_NET": "syscall",
"syscall.IN_CLASSC_NSHIFT": "syscall",
"syscall.IN_CLASSD_HOST": "syscall",
"syscall.IN_CLASSD_NET": "syscall",
"syscall.IN_CLASSD_NSHIFT": "syscall",
"syscall.IN_CLOEXEC": "syscall",
"syscall.IN_CLOSE": "syscall",
"syscall.IN_CLOSE_NOWRITE": "syscall",
"syscall.IN_CLOSE_WRITE": "syscall",
"syscall.IN_CREATE": "syscall",
"syscall.IN_DELETE": "syscall",
"syscall.IN_DELETE_SELF": "syscall",
"syscall.IN_DONT_FOLLOW": "syscall",
"syscall.IN_EXCL_UNLINK": "syscall",
"syscall.IN_IGNORED": "syscall",
"syscall.IN_ISDIR": "syscall",
"syscall.IN_LINKLOCALNETNUM": "syscall",
"syscall.IN_LOOPBACKNET": "syscall",
"syscall.IN_MASK_ADD": "syscall",
"syscall.IN_MODIFY": "syscall",
"syscall.IN_MOVE": "syscall",
"syscall.IN_MOVED_FROM": "syscall",
"syscall.IN_MOVED_TO": "syscall",
"syscall.IN_MOVE_SELF": "syscall",
"syscall.IN_NONBLOCK": "syscall",
"syscall.IN_ONESHOT": "syscall",
"syscall.IN_ONLYDIR": "syscall",
"syscall.IN_OPEN": "syscall",
"syscall.IN_Q_OVERFLOW": "syscall",
"syscall.IN_RFC3021_HOST": "syscall",
"syscall.IN_RFC3021_MASK": "syscall",
"syscall.IN_RFC3021_NET": "syscall",
"syscall.IN_RFC3021_NSHIFT": "syscall",
"syscall.IN_UNMOUNT": "syscall",
"syscall.IOC_IN": "syscall",
"syscall.IOC_INOUT": "syscall",
"syscall.IOC_OUT": "syscall",
"syscall.IOC_VENDOR": "syscall",
"syscall.IOC_WS2": "syscall",
"syscall.IO_REPARSE_TAG_SYMLINK": "syscall",
"syscall.IPMreq": "syscall",
"syscall.IPMreqn": "syscall",
"syscall.IPPROTO_3PC": "syscall",
"syscall.IPPROTO_ADFS": "syscall",
"syscall.IPPROTO_AH": "syscall",
"syscall.IPPROTO_AHIP": "syscall",
"syscall.IPPROTO_APES": "syscall",
"syscall.IPPROTO_ARGUS": "syscall",
"syscall.IPPROTO_AX25": "syscall",
"syscall.IPPROTO_BHA": "syscall",
"syscall.IPPROTO_BLT": "syscall",
"syscall.IPPROTO_BRSATMON": "syscall",
"syscall.IPPROTO_CARP": "syscall",
"syscall.IPPROTO_CFTP": "syscall",
"syscall.IPPROTO_CHAOS": "syscall",
"syscall.IPPROTO_CMTP": "syscall",
"syscall.IPPROTO_COMP": "syscall",
"syscall.IPPROTO_CPHB": "syscall",
"syscall.IPPROTO_CPNX": "syscall",
"syscall.IPPROTO_DCCP": "syscall",
"syscall.IPPROTO_DDP": "syscall",
"syscall.IPPROTO_DGP": "syscall",
"syscall.IPPROTO_DIVERT": "syscall",
"syscall.IPPROTO_DIVERT_INIT": "syscall",
"syscall.IPPROTO_DIVERT_RESP": "syscall",
"syscall.IPPROTO_DONE": "syscall",
"syscall.IPPROTO_DSTOPTS": "syscall",
"syscall.IPPROTO_EGP": "syscall",
"syscall.IPPROTO_EMCON": "syscall",
"syscall.IPPROTO_ENCAP": "syscall",
"syscall.IPPROTO_EON": "syscall",
"syscall.IPPROTO_ESP": "syscall",
"syscall.IPPROTO_ETHERIP": "syscall",
"syscall.IPPROTO_FRAGMENT": "syscall",
"syscall.IPPROTO_GGP": "syscall",
"syscall.IPPROTO_GMTP": "syscall",
"syscall.IPPROTO_GRE": "syscall",
"syscall.IPPROTO_HELLO": "syscall",
"syscall.IPPROTO_HMP": "syscall",
"syscall.IPPROTO_HOPOPTS": "syscall",
"syscall.IPPROTO_ICMP": "syscall",
"syscall.IPPROTO_ICMPV6": "syscall",
"syscall.IPPROTO_IDP": "syscall",
"syscall.IPPROTO_IDPR": "syscall",
"syscall.IPPROTO_IDRP": "syscall",
"syscall.IPPROTO_IGMP": "syscall",
"syscall.IPPROTO_IGP": "syscall",
"syscall.IPPROTO_IGRP": "syscall",
"syscall.IPPROTO_IL": "syscall",
"syscall.IPPROTO_INLSP": "syscall",
"syscall.IPPROTO_INP": "syscall",
"syscall.IPPROTO_IP": "syscall",
"syscall.IPPROTO_IPCOMP": "syscall",
"syscall.IPPROTO_IPCV": "syscall",
"syscall.IPPROTO_IPEIP": "syscall",
"syscall.IPPROTO_IPIP": "syscall",
"syscall.IPPROTO_IPPC": "syscall",
"syscall.IPPROTO_IPV4": "syscall",
"syscall.IPPROTO_IPV6": "syscall",
"syscall.IPPROTO_IPV6_ICMP": "syscall",
"syscall.IPPROTO_IRTP": "syscall",
"syscall.IPPROTO_KRYPTOLAN": "syscall",
"syscall.IPPROTO_LARP": "syscall",
"syscall.IPPROTO_LEAF1": "syscall",
"syscall.IPPROTO_LEAF2": "syscall",
"syscall.IPPROTO_MAX": "syscall",
"syscall.IPPROTO_MAXID": "syscall",
"syscall.IPPROTO_MEAS": "syscall",
"syscall.IPPROTO_MH": "syscall",
"syscall.IPPROTO_MHRP": "syscall",
"syscall.IPPROTO_MICP": "syscall",
"syscall.IPPROTO_MOBILE": "syscall",
"syscall.IPPROTO_MPLS": "syscall",
"syscall.IPPROTO_MTP": "syscall",
"syscall.IPPROTO_MUX": "syscall",
"syscall.IPPROTO_ND": "syscall",
"syscall.IPPROTO_NHRP": "syscall",
"syscall.IPPROTO_NONE": "syscall",
"syscall.IPPROTO_NSP": "syscall",
"syscall.IPPROTO_NVPII": "syscall",
"syscall.IPPROTO_OLD_DIVERT": "syscall",
"syscall.IPPROTO_OSPFIGP": "syscall",
"syscall.IPPROTO_PFSYNC": "syscall",
"syscall.IPPROTO_PGM": "syscall",
"syscall.IPPROTO_PIGP": "syscall",
"syscall.IPPROTO_PIM": "syscall",
"syscall.IPPROTO_PRM": "syscall",
"syscall.IPPROTO_PUP": "syscall",
"syscall.IPPROTO_PVP": "syscall",
"syscall.IPPROTO_RAW": "syscall",
"syscall.IPPROTO_RCCMON": "syscall",
"syscall.IPPROTO_RDP": "syscall",
"syscall.IPPROTO_ROUTING": "syscall",
"syscall.IPPROTO_RSVP": "syscall",
"syscall.IPPROTO_RVD": "syscall",
"syscall.IPPROTO_SATEXPAK": "syscall",
"syscall.IPPROTO_SATMON": "syscall",
"syscall.IPPROTO_SCCSP": "syscall",
"syscall.IPPROTO_SCTP": "syscall",
"syscall.IPPROTO_SDRP": "syscall",
"syscall.IPPROTO_SEND": "syscall",
"syscall.IPPROTO_SEP": "syscall",
"syscall.IPPROTO_SKIP": "syscall",
"syscall.IPPROTO_SPACER": "syscall",
"syscall.IPPROTO_SRPC": "syscall",
"syscall.IPPROTO_ST": "syscall",
"syscall.IPPROTO_SVMTP": "syscall",
"syscall.IPPROTO_SWIPE": "syscall",
"syscall.IPPROTO_TCF": "syscall",
"syscall.IPPROTO_TCP": "syscall",
"syscall.IPPROTO_TLSP": "syscall",
"syscall.IPPROTO_TP": "syscall",
"syscall.IPPROTO_TPXX": "syscall",
"syscall.IPPROTO_TRUNK1": "syscall",
"syscall.IPPROTO_TRUNK2": "syscall",
"syscall.IPPROTO_TTP": "syscall",
"syscall.IPPROTO_UDP": "syscall",
"syscall.IPPROTO_UDPLITE": "syscall",
"syscall.IPPROTO_VINES": "syscall",
"syscall.IPPROTO_VISA": "syscall",
"syscall.IPPROTO_VMTP": "syscall",
"syscall.IPPROTO_VRRP": "syscall",
"syscall.IPPROTO_WBEXPAK": "syscall",
"syscall.IPPROTO_WBMON": "syscall",
"syscall.IPPROTO_WSN": "syscall",
"syscall.IPPROTO_XNET": "syscall",
"syscall.IPPROTO_XTP": "syscall",
"syscall.IPV6_2292DSTOPTS": "syscall",
"syscall.IPV6_2292HOPLIMIT": "syscall",
"syscall.IPV6_2292HOPOPTS": "syscall",
"syscall.IPV6_2292NEXTHOP": "syscall",
"syscall.IPV6_2292PKTINFO": "syscall",
"syscall.IPV6_2292PKTOPTIONS": "syscall",
"syscall.IPV6_2292RTHDR": "syscall",
"syscall.IPV6_ADDRFORM": "syscall",
"syscall.IPV6_ADD_MEMBERSHIP": "syscall",
"syscall.IPV6_AUTHHDR": "syscall",
"syscall.IPV6_AUTH_LEVEL": "syscall",
"syscall.IPV6_AUTOFLOWLABEL": "syscall",
"syscall.IPV6_BINDANY": "syscall",
"syscall.IPV6_BINDV6ONLY": "syscall",
"syscall.IPV6_BOUND_IF": "syscall",
"syscall.IPV6_CHECKSUM": "syscall",
"syscall.IPV6_DEFAULT_MULTICAST_HOPS": "syscall",
"syscall.IPV6_DEFAULT_MULTICAST_LOOP": "syscall",
"syscall.IPV6_DEFHLIM": "syscall",
"syscall.IPV6_DONTFRAG": "syscall",
"syscall.IPV6_DROP_MEMBERSHIP": "syscall",
"syscall.IPV6_DSTOPTS": "syscall",
"syscall.IPV6_ESP_NETWORK_LEVEL": "syscall",
"syscall.IPV6_ESP_TRANS_LEVEL": "syscall",
"syscall.IPV6_FAITH": "syscall",
"syscall.IPV6_FLOWINFO_MASK": "syscall",
"syscall.IPV6_FLOWLABEL_MASK": "syscall",
"syscall.IPV6_FRAGTTL": "syscall",
"syscall.IPV6_FW_ADD": "syscall",
"syscall.IPV6_FW_DEL": "syscall",
"syscall.IPV6_FW_FLUSH": "syscall",
"syscall.IPV6_FW_GET": "syscall",
"syscall.IPV6_FW_ZERO": "syscall",
"syscall.IPV6_HLIMDEC": "syscall",
"syscall.IPV6_HOPLIMIT": "syscall",
"syscall.IPV6_HOPOPTS": "syscall",
"syscall.IPV6_IPCOMP_LEVEL": "syscall",
"syscall.IPV6_IPSEC_POLICY": "syscall",
"syscall.IPV6_JOIN_ANYCAST": "syscall",
"syscall.IPV6_JOIN_GROUP": "syscall",
"syscall.IPV6_LEAVE_ANYCAST": "syscall",
"syscall.IPV6_LEAVE_GROUP": "syscall",
"syscall.IPV6_MAXHLIM": "syscall",
"syscall.IPV6_MAXOPTHDR": "syscall",
"syscall.IPV6_MAXPACKET": "syscall",
"syscall.IPV6_MAX_GROUP_SRC_FILTER": "syscall",
"syscall.IPV6_MAX_MEMBERSHIPS": "syscall",
"syscall.IPV6_MAX_SOCK_SRC_FILTER": "syscall",
"syscall.IPV6_MIN_MEMBERSHIPS": "syscall",
"syscall.IPV6_MMTU": "syscall",
"syscall.IPV6_MSFILTER": "syscall",
"syscall.IPV6_MTU": "syscall",
"syscall.IPV6_MTU_DISCOVER": "syscall",
"syscall.IPV6_MULTICAST_HOPS": "syscall",
"syscall.IPV6_MULTICAST_IF": "syscall",
"syscall.IPV6_MULTICAST_LOOP": "syscall",
"syscall.IPV6_NEXTHOP": "syscall",
"syscall.IPV6_OPTIONS": "syscall",
"syscall.IPV6_PATHMTU": "syscall",
"syscall.IPV6_PIPEX": "syscall",
"syscall.IPV6_PKTINFO": "syscall",
"syscall.IPV6_PMTUDISC_DO": "syscall",
"syscall.IPV6_PMTUDISC_DONT": "syscall",
"syscall.IPV6_PMTUDISC_PROBE": "syscall",
"syscall.IPV6_PMTUDISC_WANT": "syscall",
"syscall.IPV6_PORTRANGE": "syscall",
"syscall.IPV6_PORTRANGE_DEFAULT": "syscall",
"syscall.IPV6_PORTRANGE_HIGH": "syscall",
"syscall.IPV6_PORTRANGE_LOW": "syscall",
"syscall.IPV6_PREFER_TEMPADDR": "syscall",
"syscall.IPV6_RECVDSTOPTS": "syscall",
"syscall.IPV6_RECVDSTPORT": "syscall",
"syscall.IPV6_RECVERR": "syscall",
"syscall.IPV6_RECVHOPLIMIT": "syscall",
"syscall.IPV6_RECVHOPOPTS": "syscall",
"syscall.IPV6_RECVPATHMTU": "syscall",
"syscall.IPV6_RECVPKTINFO": "syscall",
"syscall.IPV6_RECVRTHDR": "syscall",
"syscall.IPV6_RECVTCLASS": "syscall",
"syscall.IPV6_ROUTER_ALERT": "syscall",
"syscall.IPV6_RTABLE": "syscall",
"syscall.IPV6_RTHDR": "syscall",
"syscall.IPV6_RTHDRDSTOPTS": "syscall",
"syscall.IPV6_RTHDR_LOOSE": "syscall",
"syscall.IPV6_RTHDR_STRICT": "syscall",
"syscall.IPV6_RTHDR_TYPE_0": "syscall",
"syscall.IPV6_RXDSTOPTS": "syscall",
"syscall.IPV6_RXHOPOPTS": "syscall",
"syscall.IPV6_SOCKOPT_RESERVED1": "syscall",
"syscall.IPV6_TCLASS": "syscall",
"syscall.IPV6_UNICAST_HOPS": "syscall",
"syscall.IPV6_USE_MIN_MTU": "syscall",
"syscall.IPV6_V6ONLY": "syscall",
"syscall.IPV6_VERSION": "syscall",
"syscall.IPV6_VERSION_MASK": "syscall",
"syscall.IPV6_XFRM_POLICY": "syscall",
"syscall.IP_ADD_MEMBERSHIP": "syscall",
"syscall.IP_ADD_SOURCE_MEMBERSHIP": "syscall",
"syscall.IP_AUTH_LEVEL": "syscall",
"syscall.IP_BINDANY": "syscall",
"syscall.IP_BLOCK_SOURCE": "syscall",
"syscall.IP_BOUND_IF": "syscall",
"syscall.IP_DEFAULT_MULTICAST_LOOP": "syscall",
"syscall.IP_DEFAULT_MULTICAST_TTL": "syscall",
"syscall.IP_DF": "syscall",
"syscall.IP_DIVERTFL": "syscall",
"syscall.IP_DONTFRAG": "syscall",
"syscall.IP_DROP_MEMBERSHIP": "syscall",
"syscall.IP_DROP_SOURCE_MEMBERSHIP": "syscall",
"syscall.IP_DUMMYNET3": "syscall",
"syscall.IP_DUMMYNET_CONFIGURE": "syscall",
"syscall.IP_DUMMYNET_DEL": "syscall",
"syscall.IP_DUMMYNET_FLUSH": "syscall",
"syscall.IP_DUMMYNET_GET": "syscall",
"syscall.IP_EF": "syscall",
"syscall.IP_ERRORMTU": "syscall",
"syscall.IP_ESP_NETWORK_LEVEL": "syscall",
"syscall.IP_ESP_TRANS_LEVEL": "syscall",
"syscall.IP_FAITH": "syscall",
"syscall.IP_FREEBIND": "syscall",
"syscall.IP_FW3": "syscall",
"syscall.IP_FW_ADD": "syscall",
"syscall.IP_FW_DEL": "syscall",
"syscall.IP_FW_FLUSH": "syscall",
"syscall.IP_FW_GET": "syscall",
"syscall.IP_FW_NAT_CFG": "syscall",
"syscall.IP_FW_NAT_DEL": "syscall",
"syscall.IP_FW_NAT_GET_CONFIG": "syscall",
"syscall.IP_FW_NAT_GET_LOG": "syscall",
"syscall.IP_FW_RESETLOG": "syscall",
"syscall.IP_FW_TABLE_ADD": "syscall",
"syscall.IP_FW_TABLE_DEL": "syscall",
"syscall.IP_FW_TABLE_FLUSH": "syscall",
"syscall.IP_FW_TABLE_GETSIZE": "syscall",
"syscall.IP_FW_TABLE_LIST": "syscall",
"syscall.IP_FW_ZERO": "syscall",
"syscall.IP_HDRINCL": "syscall",
"syscall.IP_IPCOMP_LEVEL": "syscall",
"syscall.IP_IPSECFLOWINFO": "syscall",
"syscall.IP_IPSEC_LOCAL_AUTH": "syscall",
"syscall.IP_IPSEC_LOCAL_CRED": "syscall",
"syscall.IP_IPSEC_LOCAL_ID": "syscall",
"syscall.IP_IPSEC_POLICY": "syscall",
"syscall.IP_IPSEC_REMOTE_AUTH": "syscall",
"syscall.IP_IPSEC_REMOTE_CRED": "syscall",
"syscall.IP_IPSEC_REMOTE_ID": "syscall",
"syscall.IP_MAXPACKET": "syscall",
"syscall.IP_MAX_GROUP_SRC_FILTER": "syscall",
"syscall.IP_MAX_MEMBERSHIPS": "syscall",
"syscall.IP_MAX_SOCK_MUTE_FILTER": "syscall",
"syscall.IP_MAX_SOCK_SRC_FILTER": "syscall",
"syscall.IP_MAX_SOURCE_FILTER": "syscall",
"syscall.IP_MF": "syscall",
"syscall.IP_MINFRAGSIZE": "syscall",
"syscall.IP_MINTTL": "syscall",
"syscall.IP_MIN_MEMBERSHIPS": "syscall",
"syscall.IP_MSFILTER": "syscall",
"syscall.IP_MSS": "syscall",
"syscall.IP_MTU": "syscall",
"syscall.IP_MTU_DISCOVER": "syscall",
"syscall.IP_MULTICAST_IF": "syscall",
"syscall.IP_MULTICAST_IFINDEX": "syscall",
"syscall.IP_MULTICAST_LOOP": "syscall",
"syscall.IP_MULTICAST_TTL": "syscall",
"syscall.IP_MULTICAST_VIF": "syscall",
"syscall.IP_NAT__XXX": "syscall",
"syscall.IP_OFFMASK": "syscall",
"syscall.IP_OLD_FW_ADD": "syscall",
"syscall.IP_OLD_FW_DEL": "syscall",
"syscall.IP_OLD_FW_FLUSH": "syscall",
"syscall.IP_OLD_FW_GET": "syscall",
"syscall.IP_OLD_FW_RESETLOG": "syscall",
"syscall.IP_OLD_FW_ZERO": "syscall",
"syscall.IP_ONESBCAST": "syscall",
"syscall.IP_OPTIONS": "syscall",
"syscall.IP_ORIGDSTADDR": "syscall",
"syscall.IP_PASSSEC": "syscall",
"syscall.IP_PIPEX": "syscall",
"syscall.IP_PKTINFO": "syscall",
"syscall.IP_PKTOPTIONS": "syscall",
"syscall.IP_PMTUDISC": "syscall",
"syscall.IP_PMTUDISC_DO": "syscall",
"syscall.IP_PMTUDISC_DONT": "syscall",
"syscall.IP_PMTUDISC_PROBE": "syscall",
"syscall.IP_PMTUDISC_WANT": "syscall",
"syscall.IP_PORTRANGE": "syscall",
"syscall.IP_PORTRANGE_DEFAULT": "syscall",
"syscall.IP_PORTRANGE_HIGH": "syscall",
"syscall.IP_PORTRANGE_LOW": "syscall",
"syscall.IP_RECVDSTADDR": "syscall",
"syscall.IP_RECVDSTPORT": "syscall",
"syscall.IP_RECVERR": "syscall",
"syscall.IP_RECVIF": "syscall",
"syscall.IP_RECVOPTS": "syscall",
"syscall.IP_RECVORIGDSTADDR": "syscall",
"syscall.IP_RECVPKTINFO": "syscall",
"syscall.IP_RECVRETOPTS": "syscall",
"syscall.IP_RECVRTABLE": "syscall",
"syscall.IP_RECVTOS": "syscall",
"syscall.IP_RECVTTL": "syscall",
"syscall.IP_RETOPTS": "syscall",
"syscall.IP_RF": "syscall",
"syscall.IP_ROUTER_ALERT": "syscall",
"syscall.IP_RSVP_OFF": "syscall",
"syscall.IP_RSVP_ON": "syscall",
"syscall.IP_RSVP_VIF_OFF": "syscall",
"syscall.IP_RSVP_VIF_ON": "syscall",
"syscall.IP_RTABLE": "syscall",
"syscall.IP_SENDSRCADDR": "syscall",
"syscall.IP_STRIPHDR": "syscall",
"syscall.IP_TOS": "syscall",
"syscall.IP_TRAFFIC_MGT_BACKGROUND": "syscall",
"syscall.IP_TRANSPARENT": "syscall",
"syscall.IP_TTL": "syscall",
"syscall.IP_UNBLOCK_SOURCE": "syscall",
"syscall.IP_XFRM_POLICY": "syscall",
"syscall.IPv6MTUInfo": "syscall",
"syscall.IPv6Mreq": "syscall",
"syscall.ISIG": "syscall",
"syscall.ISTRIP": "syscall",
"syscall.IUCLC": "syscall",
"syscall.IUTF8": "syscall",
"syscall.IXANY": "syscall",
"syscall.IXOFF": "syscall",
"syscall.IXON": "syscall",
"syscall.IfAddrmsg": "syscall",
"syscall.IfAnnounceMsghdr": "syscall",
"syscall.IfData": "syscall",
"syscall.IfInfomsg": "syscall",
"syscall.IfMsghdr": "syscall",
"syscall.IfaMsghdr": "syscall",
"syscall.IfmaMsghdr": "syscall",
"syscall.IfmaMsghdr2": "syscall",
"syscall.ImplementsGetwd": "syscall",
"syscall.Inet4Pktinfo": "syscall",
"syscall.Inet6Pktinfo": "syscall",
"syscall.InotifyAddWatch": "syscall",
"syscall.InotifyEvent": "syscall",
"syscall.InotifyInit": "syscall",
"syscall.InotifyInit1": "syscall",
"syscall.InotifyRmWatch": "syscall",
"syscall.InterfaceAddrMessage": "syscall",
"syscall.InterfaceAnnounceMessage": "syscall",
"syscall.InterfaceInfo": "syscall",
"syscall.InterfaceMessage": "syscall",
"syscall.InterfaceMulticastAddrMessage": "syscall",
"syscall.InvalidHandle": "syscall",
"syscall.Ioperm": "syscall",
"syscall.Iopl": "syscall",
"syscall.Iovec": "syscall",
"syscall.IpAdapterInfo": "syscall",
"syscall.IpAddrString": "syscall",
"syscall.IpAddressString": "syscall",
"syscall.IpMaskString": "syscall",
"syscall.Issetugid": "syscall",
"syscall.KEY_ALL_ACCESS": "syscall",
"syscall.KEY_CREATE_LINK": "syscall",
"syscall.KEY_CREATE_SUB_KEY": "syscall",
"syscall.KEY_ENUMERATE_SUB_KEYS": "syscall",
"syscall.KEY_EXECUTE": "syscall",
"syscall.KEY_NOTIFY": "syscall",
"syscall.KEY_QUERY_VALUE": "syscall",
"syscall.KEY_READ": "syscall",
"syscall.KEY_SET_VALUE": "syscall",
"syscall.KEY_WOW64_32KEY": "syscall",
"syscall.KEY_WOW64_64KEY": "syscall",
"syscall.KEY_WRITE": "syscall",
"syscall.Kevent": "syscall",
"syscall.Kevent_t": "syscall",
"syscall.Kill": "syscall",
"syscall.Klogctl": "syscall",
"syscall.Kqueue": "syscall",
"syscall.LANG_ENGLISH": "syscall",
"syscall.LAYERED_PROTOCOL": "syscall",
"syscall.LCNT_OVERLOAD_FLUSH": "syscall",
"syscall.LINUX_REBOOT_CMD_CAD_OFF": "syscall",
"syscall.LINUX_REBOOT_CMD_CAD_ON": "syscall",
"syscall.LINUX_REBOOT_CMD_HALT": "syscall",
"syscall.LINUX_REBOOT_CMD_KEXEC": "syscall",
"syscall.LINUX_REBOOT_CMD_POWER_OFF": "syscall",
"syscall.LINUX_REBOOT_CMD_RESTART": "syscall",
"syscall.LINUX_REBOOT_CMD_RESTART2": "syscall",
"syscall.LINUX_REBOOT_CMD_SW_SUSPEND": "syscall",
"syscall.LINUX_REBOOT_MAGIC1": "syscall",
"syscall.LINUX_REBOOT_MAGIC2": "syscall",
"syscall.LOCK_EX": "syscall",
"syscall.LOCK_NB": "syscall",
"syscall.LOCK_SH": "syscall",
"syscall.LOCK_UN": "syscall",
"syscall.LazyDLL": "syscall",
"syscall.LazyProc": "syscall",
"syscall.Lchown": "syscall",
"syscall.Linger": "syscall",
"syscall.Link": "syscall",
"syscall.Listen": "syscall",
"syscall.Listxattr": "syscall",
"syscall.LoadCancelIoEx": "syscall",
"syscall.LoadConnectEx": "syscall",
"syscall.LoadCreateSymbolicLink": "syscall",
"syscall.LoadDLL": "syscall",
"syscall.LoadGetAddrInfo": "syscall",
"syscall.LoadLibrary": "syscall",
"syscall.LoadSetFileCompletionNotificationModes": "syscall",
"syscall.LocalFree": "syscall",
"syscall.Log2phys_t": "syscall",
"syscall.LookupAccountName": "syscall",
"syscall.LookupAccountSid": "syscall",
"syscall.LookupSID": "syscall",
"syscall.LsfJump": "syscall",
"syscall.LsfSocket": "syscall",
"syscall.LsfStmt": "syscall",
"syscall.Lstat": "syscall",
"syscall.MADV_AUTOSYNC": "syscall",
"syscall.MADV_CAN_REUSE": "syscall",
"syscall.MADV_CORE": "syscall",
"syscall.MADV_DOFORK": "syscall",
"syscall.MADV_DONTFORK": "syscall",
"syscall.MADV_DONTNEED": "syscall",
"syscall.MADV_FREE": "syscall",
"syscall.MADV_FREE_REUSABLE": "syscall",
"syscall.MADV_FREE_REUSE": "syscall",
"syscall.MADV_HUGEPAGE": "syscall",
"syscall.MADV_HWPOISON": "syscall",
"syscall.MADV_MERGEABLE": "syscall",
"syscall.MADV_NOCORE": "syscall",
"syscall.MADV_NOHUGEPAGE": "syscall",
"syscall.MADV_NORMAL": "syscall",
"syscall.MADV_NOSYNC": "syscall",
"syscall.MADV_PROTECT": "syscall",
"syscall.MADV_RANDOM": "syscall",
"syscall.MADV_REMOVE": "syscall",
"syscall.MADV_SEQUENTIAL": "syscall",
"syscall.MADV_SPACEAVAIL": "syscall",
"syscall.MADV_UNMERGEABLE": "syscall",
"syscall.MADV_WILLNEED": "syscall",
"syscall.MADV_ZERO_WIRED_PAGES": "syscall",
"syscall.MAP_32BIT": "syscall",
"syscall.MAP_ALIGNED_SUPER": "syscall",
"syscall.MAP_ALIGNMENT_16MB": "syscall",
"syscall.MAP_ALIGNMENT_1TB": "syscall",
"syscall.MAP_ALIGNMENT_256TB": "syscall",
"syscall.MAP_ALIGNMENT_4GB": "syscall",
"syscall.MAP_ALIGNMENT_64KB": "syscall",
"syscall.MAP_ALIGNMENT_64PB": "syscall",
"syscall.MAP_ALIGNMENT_MASK": "syscall",
"syscall.MAP_ALIGNMENT_SHIFT": "syscall",
"syscall.MAP_ANON": "syscall",
"syscall.MAP_ANONYMOUS": "syscall",
"syscall.MAP_COPY": "syscall",
"syscall.MAP_DENYWRITE": "syscall",
"syscall.MAP_EXECUTABLE": "syscall",
"syscall.MAP_FILE": "syscall",
"syscall.MAP_FIXED": "syscall",
"syscall.MAP_FLAGMASK": "syscall",
"syscall.MAP_GROWSDOWN": "syscall",
"syscall.MAP_HASSEMAPHORE": "syscall",
"syscall.MAP_HUGETLB": "syscall",
"syscall.MAP_INHERIT": "syscall",
"syscall.MAP_INHERIT_COPY": "syscall",
"syscall.MAP_INHERIT_DEFAULT": "syscall",
"syscall.MAP_INHERIT_DONATE_COPY": "syscall",
"syscall.MAP_INHERIT_NONE": "syscall",
"syscall.MAP_INHERIT_SHARE": "syscall",
"syscall.MAP_JIT": "syscall",
"syscall.MAP_LOCKED": "syscall",
"syscall.MAP_NOCACHE": "syscall",
"syscall.MAP_NOCORE": "syscall",
"syscall.MAP_NOEXTEND": "syscall",
"syscall.MAP_NONBLOCK": "syscall",
"syscall.MAP_NORESERVE": "syscall",
"syscall.MAP_NOSYNC": "syscall",
"syscall.MAP_POPULATE": "syscall",
"syscall.MAP_PREFAULT_READ": "syscall",
"syscall.MAP_PRIVATE": "syscall",
"syscall.MAP_RENAME": "syscall",
"syscall.MAP_RESERVED0080": "syscall",
"syscall.MAP_RESERVED0100": "syscall",
"syscall.MAP_SHARED": "syscall",
"syscall.MAP_STACK": "syscall",
"syscall.MAP_TRYFIXED": "syscall",
"syscall.MAP_TYPE": "syscall",
"syscall.MAP_WIRED": "syscall",
"syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE": "syscall",
"syscall.MAXLEN_IFDESCR": "syscall",
"syscall.MAXLEN_PHYSADDR": "syscall",
"syscall.MAX_ADAPTER_ADDRESS_LENGTH": "syscall",
"syscall.MAX_ADAPTER_DESCRIPTION_LENGTH": "syscall",
"syscall.MAX_ADAPTER_NAME_LENGTH": "syscall",
"syscall.MAX_COMPUTERNAME_LENGTH": "syscall",
"syscall.MAX_INTERFACE_NAME_LEN": "syscall",
"syscall.MAX_LONG_PATH": "syscall",
"syscall.MAX_PATH": "syscall",
"syscall.MAX_PROTOCOL_CHAIN": "syscall",
"syscall.MCL_CURRENT": "syscall",
"syscall.MCL_FUTURE": "syscall",
"syscall.MNT_DETACH": "syscall",
"syscall.MNT_EXPIRE": "syscall",
"syscall.MNT_FORCE": "syscall",
"syscall.MSG_BCAST": "syscall",
"syscall.MSG_CMSG_CLOEXEC": "syscall",
"syscall.MSG_COMPAT": "syscall",
"syscall.MSG_CONFIRM": "syscall",
"syscall.MSG_CONTROLMBUF": "syscall",
"syscall.MSG_CTRUNC": "syscall",
"syscall.MSG_DONTROUTE": "syscall",
"syscall.MSG_DONTWAIT": "syscall",
"syscall.MSG_EOF": "syscall",
"syscall.MSG_EOR": "syscall",
"syscall.MSG_ERRQUEUE": "syscall",
"syscall.MSG_FASTOPEN": "syscall",
"syscall.MSG_FIN": "syscall",
"syscall.MSG_FLUSH": "syscall",
"syscall.MSG_HAVEMORE": "syscall",
"syscall.MSG_HOLD": "syscall",
"syscall.MSG_IOVUSRSPACE": "syscall",
"syscall.MSG_LENUSRSPACE": "syscall",
"syscall.MSG_MCAST": "syscall",
"syscall.MSG_MORE": "syscall",
"syscall.MSG_NAMEMBUF": "syscall",
"syscall.MSG_NBIO": "syscall",
"syscall.MSG_NEEDSA": "syscall",
"syscall.MSG_NOSIGNAL": "syscall",
"syscall.MSG_NOTIFICATION": "syscall",
"syscall.MSG_OOB": "syscall",
"syscall.MSG_PEEK": "syscall",
"syscall.MSG_PROXY": "syscall",
"syscall.MSG_RCVMORE": "syscall",
"syscall.MSG_RST": "syscall",
"syscall.MSG_SEND": "syscall",
"syscall.MSG_SYN": "syscall",
"syscall.MSG_TRUNC": "syscall",
"syscall.MSG_TRYHARD": "syscall",
"syscall.MSG_USERFLAGS": "syscall",
"syscall.MSG_WAITALL": "syscall",
"syscall.MSG_WAITFORONE": "syscall",
"syscall.MSG_WAITSTREAM": "syscall",
"syscall.MS_ACTIVE": "syscall",
"syscall.MS_ASYNC": "syscall",
"syscall.MS_BIND": "syscall",
"syscall.MS_DEACTIVATE": "syscall",
"syscall.MS_DIRSYNC": "syscall",
"syscall.MS_INVALIDATE": "syscall",
"syscall.MS_I_VERSION": "syscall",
"syscall.MS_KERNMOUNT": "syscall",
"syscall.MS_KILLPAGES": "syscall",
"syscall.MS_MANDLOCK": "syscall",
"syscall.MS_MGC_MSK": "syscall",
"syscall.MS_MGC_VAL": "syscall",
"syscall.MS_MOVE": "syscall",
"syscall.MS_NOATIME": "syscall",
"syscall.MS_NODEV": "syscall",
"syscall.MS_NODIRATIME": "syscall",
"syscall.MS_NOEXEC": "syscall",
"syscall.MS_NOSUID": "syscall",
"syscall.MS_NOUSER": "syscall",
"syscall.MS_POSIXACL": "syscall",
"syscall.MS_PRIVATE": "syscall",
"syscall.MS_RDONLY": "syscall",
"syscall.MS_REC": "syscall",
"syscall.MS_RELATIME": "syscall",
"syscall.MS_REMOUNT": "syscall",
"syscall.MS_RMT_MASK": "syscall",
"syscall.MS_SHARED": "syscall",
"syscall.MS_SILENT": "syscall",
"syscall.MS_SLAVE": "syscall",
"syscall.MS_STRICTATIME": "syscall",
"syscall.MS_SYNC": "syscall",
"syscall.MS_SYNCHRONOUS": "syscall",
"syscall.MS_UNBINDABLE": "syscall",
"syscall.Madvise": "syscall",
"syscall.MapViewOfFile": "syscall",
"syscall.MaxTokenInfoClass": "syscall",
"syscall.Mclpool": "syscall",
"syscall.MibIfRow": "syscall",
"syscall.Mkdir": "syscall",
"syscall.Mkdirat": "syscall",
"syscall.Mkfifo": "syscall",
"syscall.Mknod": "syscall",
"syscall.Mknodat": "syscall",
"syscall.Mlock": "syscall",
"syscall.Mlockall": "syscall",
"syscall.Mmap": "syscall",
"syscall.Mount": "syscall",
"syscall.MoveFile": "syscall",
"syscall.Mprotect": "syscall",
"syscall.Msghdr": "syscall",
"syscall.Munlock": "syscall",
"syscall.Munlockall": "syscall",
"syscall.Munmap": "syscall",
"syscall.MustLoadDLL": "syscall",
"syscall.NAME_MAX": "syscall",
"syscall.NETLINK_ADD_MEMBERSHIP": "syscall",
"syscall.NETLINK_AUDIT": "syscall",
"syscall.NETLINK_BROADCAST_ERROR": "syscall",
"syscall.NETLINK_CONNECTOR": "syscall",
"syscall.NETLINK_DNRTMSG": "syscall",
"syscall.NETLINK_DROP_MEMBERSHIP": "syscall",
"syscall.NETLINK_ECRYPTFS": "syscall",
"syscall.NETLINK_FIB_LOOKUP": "syscall",
"syscall.NETLINK_FIREWALL": "syscall",
"syscall.NETLINK_GENERIC": "syscall",
"syscall.NETLINK_INET_DIAG": "syscall",
"syscall.NETLINK_IP6_FW": "syscall",
"syscall.NETLINK_ISCSI": "syscall",
"syscall.NETLINK_KOBJECT_UEVENT": "syscall",
"syscall.NETLINK_NETFILTER": "syscall",
"syscall.NETLINK_NFLOG": "syscall",
"syscall.NETLINK_NO_ENOBUFS": "syscall",
"syscall.NETLINK_PKTINFO": "syscall",
"syscall.NETLINK_RDMA": "syscall",
"syscall.NETLINK_ROUTE": "syscall",
"syscall.NETLINK_SCSITRANSPORT": "syscall",
"syscall.NETLINK_SELINUX": "syscall",
"syscall.NETLINK_UNUSED": "syscall",
"syscall.NETLINK_USERSOCK": "syscall",
"syscall.NETLINK_XFRM": "syscall",
"syscall.NET_RT_DUMP": "syscall",
"syscall.NET_RT_DUMP2": "syscall",
"syscall.NET_RT_FLAGS": "syscall",
"syscall.NET_RT_IFLIST": "syscall",
"syscall.NET_RT_IFLIST2": "syscall",
"syscall.NET_RT_IFLISTL": "syscall",
"syscall.NET_RT_IFMALIST": "syscall",
"syscall.NET_RT_MAXID": "syscall",
"syscall.NET_RT_OIFLIST": "syscall",
"syscall.NET_RT_OOIFLIST": "syscall",
"syscall.NET_RT_STAT": "syscall",
"syscall.NET_RT_STATS": "syscall",
"syscall.NET_RT_TABLE": "syscall",
"syscall.NET_RT_TRASH": "syscall",
"syscall.NLA_ALIGNTO": "syscall",
"syscall.NLA_F_NESTED": "syscall",
"syscall.NLA_F_NET_BYTEORDER": "syscall",
"syscall.NLA_HDRLEN": "syscall",
"syscall.NLMSG_ALIGNTO": "syscall",
"syscall.NLMSG_DONE": "syscall",
"syscall.NLMSG_ERROR": "syscall",
"syscall.NLMSG_HDRLEN": "syscall",
"syscall.NLMSG_MIN_TYPE": "syscall",
"syscall.NLMSG_NOOP": "syscall",
"syscall.NLMSG_OVERRUN": "syscall",
"syscall.NLM_F_ACK": "syscall",
"syscall.NLM_F_APPEND": "syscall",
"syscall.NLM_F_ATOMIC": "syscall",
"syscall.NLM_F_CREATE": "syscall",
"syscall.NLM_F_DUMP": "syscall",
"syscall.NLM_F_ECHO": "syscall",
"syscall.NLM_F_EXCL": "syscall",
"syscall.NLM_F_MATCH": "syscall",
"syscall.NLM_F_MULTI": "syscall",
"syscall.NLM_F_REPLACE": "syscall",
"syscall.NLM_F_REQUEST": "syscall",
"syscall.NLM_F_ROOT": "syscall",
"syscall.NOFLSH": "syscall",
"syscall.NOTE_ABSOLUTE": "syscall",
"syscall.NOTE_ATTRIB": "syscall",
"syscall.NOTE_CHILD": "syscall",
"syscall.NOTE_DELETE": "syscall",
"syscall.NOTE_EOF": "syscall",
"syscall.NOTE_EXEC": "syscall",
"syscall.NOTE_EXIT": "syscall",
"syscall.NOTE_EXITSTATUS": "syscall",
"syscall.NOTE_EXTEND": "syscall",
"syscall.NOTE_FFAND": "syscall",
"syscall.NOTE_FFCOPY": "syscall",
"syscall.NOTE_FFCTRLMASK": "syscall",
"syscall.NOTE_FFLAGSMASK": "syscall",
"syscall.NOTE_FFNOP": "syscall",
"syscall.NOTE_FFOR": "syscall",
"syscall.NOTE_FORK": "syscall",
"syscall.NOTE_LINK": "syscall",
"syscall.NOTE_LOWAT": "syscall",
"syscall.NOTE_NONE": "syscall",
"syscall.NOTE_NSECONDS": "syscall",
"syscall.NOTE_PCTRLMASK": "syscall",
"syscall.NOTE_PDATAMASK": "syscall",
"syscall.NOTE_REAP": "syscall",
"syscall.NOTE_RENAME": "syscall",
"syscall.NOTE_RESOURCEEND": "syscall",
"syscall.NOTE_REVOKE": "syscall",
"syscall.NOTE_SECONDS": "syscall",
"syscall.NOTE_SIGNAL": "syscall",
"syscall.NOTE_TRACK": "syscall",
"syscall.NOTE_TRACKERR": "syscall",
"syscall.NOTE_TRIGGER": "syscall",
"syscall.NOTE_TRUNCATE": "syscall",
"syscall.NOTE_USECONDS": "syscall",
"syscall.NOTE_VM_ERROR": "syscall",
"syscall.NOTE_VM_PRESSURE": "syscall",
"syscall.NOTE_VM_PRESSURE_SUDDEN_TERMINATE": "syscall",
"syscall.NOTE_VM_PRESSURE_TERMINATE": "syscall",
"syscall.NOTE_WRITE": "syscall",
"syscall.NameCanonical": "syscall",
"syscall.NameCanonicalEx": "syscall",
"syscall.NameDisplay": "syscall",
"syscall.NameDnsDomain": "syscall",
"syscall.NameFullyQualifiedDN": "syscall",
"syscall.NameSamCompatible": "syscall",
"syscall.NameServicePrincipal": "syscall",
"syscall.NameUniqueId": "syscall",
"syscall.NameUnknown": "syscall",
"syscall.NameUserPrincipal": "syscall",
"syscall.Nanosleep": "syscall",
"syscall.NetApiBufferFree": "syscall",
"syscall.NetGetJoinInformation": "syscall",
"syscall.NetSetupDomainName": "syscall",
"syscall.NetSetupUnjoined": "syscall",
"syscall.NetSetupUnknownStatus": "syscall",
"syscall.NetSetupWorkgroupName": "syscall",
"syscall.NetUserGetInfo": "syscall",
"syscall.NetlinkMessage": "syscall",
"syscall.NetlinkRIB": "syscall",
"syscall.NetlinkRouteAttr": "syscall",
"syscall.NetlinkRouteRequest": "syscall",
"syscall.NewCallback": "syscall",
"syscall.NewCallbackCDecl": "syscall",
"syscall.NewLazyDLL": "syscall",
"syscall.NlAttr": "syscall",
"syscall.NlMsgerr": "syscall",
"syscall.NlMsghdr": "syscall",
"syscall.NsecToFiletime": "syscall",
"syscall.NsecToTimespec": "syscall",
"syscall.NsecToTimeval": "syscall",
"syscall.Ntohs": "syscall",
"syscall.OCRNL": "syscall",
"syscall.OFDEL": "syscall",
"syscall.OFILL": "syscall",
"syscall.OFIOGETBMAP": "syscall",
"syscall.OID_PKIX_KP_SERVER_AUTH": "syscall",
"syscall.OID_SERVER_GATED_CRYPTO": "syscall",
"syscall.OID_SGC_NETSCAPE": "syscall",
"syscall.OLCUC": "syscall",
"syscall.ONLCR": "syscall",
"syscall.ONLRET": "syscall",
"syscall.ONOCR": "syscall",
"syscall.ONOEOT": "syscall",
"syscall.OPEN_ALWAYS": "syscall",
"syscall.OPEN_EXISTING": "syscall",
"syscall.OPOST": "syscall",
"syscall.O_ACCMODE": "syscall",
"syscall.O_ALERT": "syscall",
"syscall.O_ALT_IO": "syscall",
"syscall.O_APPEND": "syscall",
"syscall.O_ASYNC": "syscall",
"syscall.O_CLOEXEC": "syscall",
"syscall.O_CREAT": "syscall",
"syscall.O_DIRECT": "syscall",
"syscall.O_DIRECTORY": "syscall",
"syscall.O_DSYNC": "syscall",
"syscall.O_EVTONLY": "syscall",
"syscall.O_EXCL": "syscall",
"syscall.O_EXEC": "syscall",
"syscall.O_EXLOCK": "syscall",
"syscall.O_FSYNC": "syscall",
"syscall.O_LARGEFILE": "syscall",
"syscall.O_NDELAY": "syscall",
"syscall.O_NOATIME": "syscall",
"syscall.O_NOCTTY": "syscall",
"syscall.O_NOFOLLOW": "syscall",
"syscall.O_NONBLOCK": "syscall",
"syscall.O_NOSIGPIPE": "syscall",
"syscall.O_POPUP": "syscall",
"syscall.O_RDONLY": "syscall",
"syscall.O_RDWR": "syscall",
"syscall.O_RSYNC": "syscall",
"syscall.O_SHLOCK": "syscall",
"syscall.O_SYMLINK": "syscall",
"syscall.O_SYNC": "syscall",
"syscall.O_TRUNC": "syscall",
"syscall.O_TTY_INIT": "syscall",
"syscall.O_WRONLY": "syscall",
"syscall.Open": "syscall",
"syscall.OpenCurrentProcessToken": "syscall",
"syscall.OpenProcess": "syscall",
"syscall.OpenProcessToken": "syscall",
"syscall.Openat": "syscall",
"syscall.Overlapped": "syscall",
"syscall.PACKET_ADD_MEMBERSHIP": "syscall",
"syscall.PACKET_BROADCAST": "syscall",
"syscall.PACKET_DROP_MEMBERSHIP": "syscall",
"syscall.PACKET_FASTROUTE": "syscall",
"syscall.PACKET_HOST": "syscall",
"syscall.PACKET_LOOPBACK": "syscall",
"syscall.PACKET_MR_ALLMULTI": "syscall",
"syscall.PACKET_MR_MULTICAST": "syscall",
"syscall.PACKET_MR_PROMISC": "syscall",
"syscall.PACKET_MULTICAST": "syscall",
"syscall.PACKET_OTHERHOST": "syscall",
"syscall.PACKET_OUTGOING": "syscall",
"syscall.PACKET_RECV_OUTPUT": "syscall",
"syscall.PACKET_RX_RING": "syscall",
"syscall.PACKET_STATISTICS": "syscall",
"syscall.PAGE_EXECUTE_READ": "syscall",
"syscall.PAGE_EXECUTE_READWRITE": "syscall",
"syscall.PAGE_EXECUTE_WRITECOPY": "syscall",
"syscall.PAGE_READONLY": "syscall",
"syscall.PAGE_READWRITE": "syscall",
"syscall.PAGE_WRITECOPY": "syscall",
"syscall.PARENB": "syscall",
"syscall.PARMRK": "syscall",
"syscall.PARODD": "syscall",
"syscall.PENDIN": "syscall",
"syscall.PFL_HIDDEN": "syscall",
"syscall.PFL_MATCHES_PROTOCOL_ZERO": "syscall",
"syscall.PFL_MULTIPLE_PROTO_ENTRIES": "syscall",
"syscall.PFL_NETWORKDIRECT_PROVIDER": "syscall",
"syscall.PFL_RECOMMENDED_PROTO_ENTRY": "syscall",
"syscall.PF_FLUSH": "syscall",
"syscall.PKCS_7_ASN_ENCODING": "syscall",
"syscall.PMC5_PIPELINE_FLUSH": "syscall",
"syscall.PRIO_PGRP": "syscall",
"syscall.PRIO_PROCESS": "syscall",
"syscall.PRIO_USER": "syscall",
"syscall.PRI_IOFLUSH": "syscall",
"syscall.PROCESS_QUERY_INFORMATION": "syscall",
"syscall.PROCESS_TERMINATE": "syscall",
"syscall.PROT_EXEC": "syscall",
"syscall.PROT_GROWSDOWN": "syscall",
"syscall.PROT_GROWSUP": "syscall",
"syscall.PROT_NONE": "syscall",
"syscall.PROT_READ": "syscall",
"syscall.PROT_WRITE": "syscall",
"syscall.PROV_DH_SCHANNEL": "syscall",
"syscall.PROV_DSS": "syscall",
"syscall.PROV_DSS_DH": "syscall",
"syscall.PROV_EC_ECDSA_FULL": "syscall",
"syscall.PROV_EC_ECDSA_SIG": "syscall",
"syscall.PROV_EC_ECNRA_FULL": "syscall",
"syscall.PROV_EC_ECNRA_SIG": "syscall",
"syscall.PROV_FORTEZZA": "syscall",
"syscall.PROV_INTEL_SEC": "syscall",
"syscall.PROV_MS_EXCHANGE": "syscall",
"syscall.PROV_REPLACE_OWF": "syscall",
"syscall.PROV_RNG": "syscall",
"syscall.PROV_RSA_AES": "syscall",
"syscall.PROV_RSA_FULL": "syscall",
"syscall.PROV_RSA_SCHANNEL": "syscall",
"syscall.PROV_RSA_SIG": "syscall",
"syscall.PROV_SPYRUS_LYNKS": "syscall",
"syscall.PROV_SSL": "syscall",
"syscall.PR_CAPBSET_DROP": "syscall",
"syscall.PR_CAPBSET_READ": "syscall",
"syscall.PR_CLEAR_SECCOMP_FILTER": "syscall",
"syscall.PR_ENDIAN_BIG": "syscall",
"syscall.PR_ENDIAN_LITTLE": "syscall",
"syscall.PR_ENDIAN_PPC_LITTLE": "syscall",
"syscall.PR_FPEMU_NOPRINT": "syscall",
"syscall.PR_FPEMU_SIGFPE": "syscall",
"syscall.PR_FP_EXC_ASYNC": "syscall",
"syscall.PR_FP_EXC_DISABLED": "syscall",
"syscall.PR_FP_EXC_DIV": "syscall",
"syscall.PR_FP_EXC_INV": "syscall",
"syscall.PR_FP_EXC_NONRECOV": "syscall",
"syscall.PR_FP_EXC_OVF": "syscall",
"syscall.PR_FP_EXC_PRECISE": "syscall",
"syscall.PR_FP_EXC_RES": "syscall",
"syscall.PR_FP_EXC_SW_ENABLE": "syscall",
"syscall.PR_FP_EXC_UND": "syscall",
"syscall.PR_GET_DUMPABLE": "syscall",
"syscall.PR_GET_ENDIAN": "syscall",
"syscall.PR_GET_FPEMU": "syscall",
"syscall.PR_GET_FPEXC": "syscall",
"syscall.PR_GET_KEEPCAPS": "syscall",
"syscall.PR_GET_NAME": "syscall",
"syscall.PR_GET_PDEATHSIG": "syscall",
"syscall.PR_GET_SECCOMP": "syscall",
"syscall.PR_GET_SECCOMP_FILTER": "syscall",
"syscall.PR_GET_SECUREBITS": "syscall",
"syscall.PR_GET_TIMERSLACK": "syscall",
"syscall.PR_GET_TIMING": "syscall",
"syscall.PR_GET_TSC": "syscall",
"syscall.PR_GET_UNALIGN": "syscall",
"syscall.PR_MCE_KILL": "syscall",
"syscall.PR_MCE_KILL_CLEAR": "syscall",
"syscall.PR_MCE_KILL_DEFAULT": "syscall",
"syscall.PR_MCE_KILL_EARLY": "syscall",
"syscall.PR_MCE_KILL_GET": "syscall",
"syscall.PR_MCE_KILL_LATE": "syscall",
"syscall.PR_MCE_KILL_SET": "syscall",
"syscall.PR_SECCOMP_FILTER_EVENT": "syscall",
"syscall.PR_SECCOMP_FILTER_SYSCALL": "syscall",
"syscall.PR_SET_DUMPABLE": "syscall",
"syscall.PR_SET_ENDIAN": "syscall",
"syscall.PR_SET_FPEMU": "syscall",
"syscall.PR_SET_FPEXC": "syscall",
"syscall.PR_SET_KEEPCAPS": "syscall",
"syscall.PR_SET_NAME": "syscall",
"syscall.PR_SET_PDEATHSIG": "syscall",
"syscall.PR_SET_PTRACER": "syscall",
"syscall.PR_SET_SECCOMP": "syscall",
"syscall.PR_SET_SECCOMP_FILTER": "syscall",
"syscall.PR_SET_SECUREBITS": "syscall",
"syscall.PR_SET_TIMERSLACK": "syscall",
"syscall.PR_SET_TIMING": "syscall",
"syscall.PR_SET_TSC": "syscall",
"syscall.PR_SET_UNALIGN": "syscall",
"syscall.PR_TASK_PERF_EVENTS_DISABLE": "syscall",
"syscall.PR_TASK_PERF_EVENTS_ENABLE": "syscall",
"syscall.PR_TIMING_STATISTICAL": "syscall",
"syscall.PR_TIMING_TIMESTAMP": "syscall",
"syscall.PR_TSC_ENABLE": "syscall",
"syscall.PR_TSC_SIGSEGV": "syscall",
"syscall.PR_UNALIGN_NOPRINT": "syscall",
"syscall.PR_UNALIGN_SIGBUS": "syscall",
"syscall.PTRACE_ARCH_PRCTL": "syscall",
"syscall.PTRACE_ATTACH": "syscall",
"syscall.PTRACE_CONT": "syscall",
"syscall.PTRACE_DETACH": "syscall",
"syscall.PTRACE_EVENT_CLONE": "syscall",
"syscall.PTRACE_EVENT_EXEC": "syscall",
"syscall.PTRACE_EVENT_EXIT": "syscall",
"syscall.PTRACE_EVENT_FORK": "syscall",
"syscall.PTRACE_EVENT_VFORK": "syscall",
"syscall.PTRACE_EVENT_VFORK_DONE": "syscall",
"syscall.PTRACE_GETCRUNCHREGS": "syscall",
"syscall.PTRACE_GETEVENTMSG": "syscall",
"syscall.PTRACE_GETFPREGS": "syscall",
"syscall.PTRACE_GETFPXREGS": "syscall",
"syscall.PTRACE_GETHBPREGS": "syscall",
"syscall.PTRACE_GETREGS": "syscall",
"syscall.PTRACE_GETREGSET": "syscall",
"syscall.PTRACE_GETSIGINFO": "syscall",
"syscall.PTRACE_GETVFPREGS": "syscall",
"syscall.PTRACE_GETWMMXREGS": "syscall",
"syscall.PTRACE_GET_THREAD_AREA": "syscall",
"syscall.PTRACE_KILL": "syscall",
"syscall.PTRACE_OLDSETOPTIONS": "syscall",
"syscall.PTRACE_O_MASK": "syscall",
"syscall.PTRACE_O_TRACECLONE": "syscall",
"syscall.PTRACE_O_TRACEEXEC": "syscall",
"syscall.PTRACE_O_TRACEEXIT": "syscall",
"syscall.PTRACE_O_TRACEFORK": "syscall",
"syscall.PTRACE_O_TRACESYSGOOD": "syscall",
"syscall.PTRACE_O_TRACEVFORK": "syscall",
"syscall.PTRACE_O_TRACEVFORKDONE": "syscall",
"syscall.PTRACE_PEEKDATA": "syscall",
"syscall.PTRACE_PEEKTEXT": "syscall",
"syscall.PTRACE_PEEKUSR": "syscall",
"syscall.PTRACE_POKEDATA": "syscall",
"syscall.PTRACE_POKETEXT": "syscall",
"syscall.PTRACE_POKEUSR": "syscall",
"syscall.PTRACE_SETCRUNCHREGS": "syscall",
"syscall.PTRACE_SETFPREGS": "syscall",
"syscall.PTRACE_SETFPXREGS": "syscall",
"syscall.PTRACE_SETHBPREGS": "syscall",
"syscall.PTRACE_SETOPTIONS": "syscall",
"syscall.PTRACE_SETREGS": "syscall",
"syscall.PTRACE_SETREGSET": "syscall",
"syscall.PTRACE_SETSIGINFO": "syscall",
"syscall.PTRACE_SETVFPREGS": "syscall",
"syscall.PTRACE_SETWMMXREGS": "syscall",
"syscall.PTRACE_SET_SYSCALL": "syscall",
"syscall.PTRACE_SET_THREAD_AREA": "syscall",
"syscall.PTRACE_SINGLEBLOCK": "syscall",
"syscall.PTRACE_SINGLESTEP": "syscall",
"syscall.PTRACE_SYSCALL": "syscall",
"syscall.PTRACE_SYSEMU": "syscall",
"syscall.PTRACE_SYSEMU_SINGLESTEP": "syscall",
"syscall.PTRACE_TRACEME": "syscall",
"syscall.PT_ATTACH": "syscall",
"syscall.PT_ATTACHEXC": "syscall",
"syscall.PT_CONTINUE": "syscall",
"syscall.PT_DATA_ADDR": "syscall",
"syscall.PT_DENY_ATTACH": "syscall",
"syscall.PT_DETACH": "syscall",
"syscall.PT_FIRSTMACH": "syscall",
"syscall.PT_FORCEQUOTA": "syscall",
"syscall.PT_KILL": "syscall",
"syscall.PT_MASK": "syscall",
"syscall.PT_READ_D": "syscall",
"syscall.PT_READ_I": "syscall",
"syscall.PT_READ_U": "syscall",
"syscall.PT_SIGEXC": "syscall",
"syscall.PT_STEP": "syscall",
"syscall.PT_TEXT_ADDR": "syscall",
"syscall.PT_TEXT_END_ADDR": "syscall",
"syscall.PT_THUPDATE": "syscall",
"syscall.PT_TRACE_ME": "syscall",
"syscall.PT_WRITE_D": "syscall",
"syscall.PT_WRITE_I": "syscall",
"syscall.PT_WRITE_U": "syscall",
"syscall.ParseDirent": "syscall",
"syscall.ParseNetlinkMessage": "syscall",
"syscall.ParseNetlinkRouteAttr": "syscall",
"syscall.ParseRoutingMessage": "syscall",
"syscall.ParseRoutingSockaddr": "syscall",
"syscall.ParseSocketControlMessage": "syscall",
"syscall.ParseUnixCredentials": "syscall",
"syscall.ParseUnixRights": "syscall",
"syscall.PathMax": "syscall",
"syscall.Pathconf": "syscall",
"syscall.Pause": "syscall",
"syscall.Pipe": "syscall",
"syscall.Pipe2": "syscall",
"syscall.PivotRoot": "syscall",
"syscall.PostQueuedCompletionStatus": "syscall",
"syscall.Pread": "syscall",
"syscall.Proc": "syscall",
"syscall.ProcAttr": "syscall",
"syscall.Process32First": "syscall",
"syscall.Process32Next": "syscall",
"syscall.ProcessEntry32": "syscall",
"syscall.ProcessInformation": "syscall",
"syscall.Protoent": "syscall",
"syscall.PtraceAttach": "syscall",
"syscall.PtraceCont": "syscall",
"syscall.PtraceDetach": "syscall",
"syscall.PtraceGetEventMsg": "syscall",
"syscall.PtraceGetRegs": "syscall",
"syscall.PtracePeekData": "syscall",
"syscall.PtracePeekText": "syscall",
"syscall.PtracePokeData": "syscall",
"syscall.PtracePokeText": "syscall",
"syscall.PtraceRegs": "syscall",
"syscall.PtraceSetOptions": "syscall",
"syscall.PtraceSetRegs": "syscall",
"syscall.PtraceSingleStep": "syscall",
"syscall.PtraceSyscall": "syscall",
"syscall.Pwrite": "syscall",
"syscall.REG_BINARY": "syscall",
"syscall.REG_DWORD": "syscall",
"syscall.REG_DWORD_BIG_ENDIAN": "syscall",
"syscall.REG_DWORD_LITTLE_ENDIAN": "syscall",
"syscall.REG_EXPAND_SZ": "syscall",
"syscall.REG_FULL_RESOURCE_DESCRIPTOR": "syscall",
"syscall.REG_LINK": "syscall",
"syscall.REG_MULTI_SZ": "syscall",
"syscall.REG_NONE": "syscall",
"syscall.REG_QWORD": "syscall",
"syscall.REG_QWORD_LITTLE_ENDIAN": "syscall",
"syscall.REG_RESOURCE_LIST": "syscall",
"syscall.REG_RESOURCE_REQUIREMENTS_LIST": "syscall",
"syscall.REG_SZ": "syscall",
"syscall.RLIMIT_AS": "syscall",
"syscall.RLIMIT_CORE": "syscall",
"syscall.RLIMIT_CPU": "syscall",
"syscall.RLIMIT_DATA": "syscall",
"syscall.RLIMIT_FSIZE": "syscall",
"syscall.RLIMIT_NOFILE": "syscall",
"syscall.RLIMIT_STACK": "syscall",
"syscall.RLIM_INFINITY": "syscall",
"syscall.RTAX_ADVMSS": "syscall",
"syscall.RTAX_AUTHOR": "syscall",
"syscall.RTAX_BRD": "syscall",
"syscall.RTAX_CWND": "syscall",
"syscall.RTAX_DST": "syscall",
"syscall.RTAX_FEATURES": "syscall",
"syscall.RTAX_FEATURE_ALLFRAG": "syscall",
"syscall.RTAX_FEATURE_ECN": "syscall",
"syscall.RTAX_FEATURE_SACK": "syscall",
"syscall.RTAX_FEATURE_TIMESTAMP": "syscall",
"syscall.RTAX_GATEWAY": "syscall",
"syscall.RTAX_GENMASK": "syscall",
"syscall.RTAX_HOPLIMIT": "syscall",
"syscall.RTAX_IFA": "syscall",
"syscall.RTAX_IFP": "syscall",
"syscall.RTAX_INITCWND": "syscall",
"syscall.RTAX_INITRWND": "syscall",
"syscall.RTAX_LABEL": "syscall",
"syscall.RTAX_LOCK": "syscall",
"syscall.RTAX_MAX": "syscall",
"syscall.RTAX_MTU": "syscall",
"syscall.RTAX_NETMASK": "syscall",
"syscall.RTAX_REORDERING": "syscall",
"syscall.RTAX_RTO_MIN": "syscall",
"syscall.RTAX_RTT": "syscall",
"syscall.RTAX_RTTVAR": "syscall",
"syscall.RTAX_SRC": "syscall",
"syscall.RTAX_SRCMASK": "syscall",
"syscall.RTAX_SSTHRESH": "syscall",
"syscall.RTAX_TAG": "syscall",
"syscall.RTAX_UNSPEC": "syscall",
"syscall.RTAX_WINDOW": "syscall",
"syscall.RTA_ALIGNTO": "syscall",
"syscall.RTA_AUTHOR": "syscall",
"syscall.RTA_BRD": "syscall",
"syscall.RTA_CACHEINFO": "syscall",
"syscall.RTA_DST": "syscall",
"syscall.RTA_FLOW": "syscall",
"syscall.RTA_GATEWAY": "syscall",
"syscall.RTA_GENMASK": "syscall",
"syscall.RTA_IFA": "syscall",
"syscall.RTA_IFP": "syscall",
"syscall.RTA_IIF": "syscall",
"syscall.RTA_LABEL": "syscall",
"syscall.RTA_MAX": "syscall",
"syscall.RTA_METRICS": "syscall",
"syscall.RTA_MULTIPATH": "syscall",
"syscall.RTA_NETMASK": "syscall",
"syscall.RTA_OIF": "syscall",
"syscall.RTA_PREFSRC": "syscall",
"syscall.RTA_PRIORITY": "syscall",
"syscall.RTA_SRC": "syscall",
"syscall.RTA_SRCMASK": "syscall",
"syscall.RTA_TABLE": "syscall",
"syscall.RTA_TAG": "syscall",
"syscall.RTA_UNSPEC": "syscall",
"syscall.RTCF_DIRECTSRC": "syscall",
"syscall.RTCF_DOREDIRECT": "syscall",
"syscall.RTCF_LOG": "syscall",
"syscall.RTCF_MASQ": "syscall",
"syscall.RTCF_NAT": "syscall",
"syscall.RTCF_VALVE": "syscall",
"syscall.RTF_ADDRCLASSMASK": "syscall",
"syscall.RTF_ADDRCONF": "syscall",
"syscall.RTF_ALLONLINK": "syscall",
"syscall.RTF_ANNOUNCE": "syscall",
"syscall.RTF_BLACKHOLE": "syscall",
"syscall.RTF_BROADCAST": "syscall",
"syscall.RTF_CACHE": "syscall",
"syscall.RTF_CLONED": "syscall",
"syscall.RTF_CLONING": "syscall",
"syscall.RTF_CONDEMNED": "syscall",
"syscall.RTF_DEFAULT": "syscall",
"syscall.RTF_DELCLONE": "syscall",
"syscall.RTF_DONE": "syscall",
"syscall.RTF_DYNAMIC": "syscall",
"syscall.RTF_FLOW": "syscall",
"syscall.RTF_FMASK": "syscall",
"syscall.RTF_GATEWAY": "syscall",
"syscall.RTF_GWFLAG_COMPAT": "syscall",
"syscall.RTF_HOST": "syscall",
"syscall.RTF_IFREF": "syscall",
"syscall.RTF_IFSCOPE": "syscall",
"syscall.RTF_INTERFACE": "syscall",
"syscall.RTF_IRTT": "syscall",
"syscall.RTF_LINKRT": "syscall",
"syscall.RTF_LLDATA": "syscall",
"syscall.RTF_LLINFO": "syscall",
"syscall.RTF_LOCAL": "syscall",
"syscall.RTF_MASK": "syscall",
"syscall.RTF_MODIFIED": "syscall",
"syscall.RTF_MPATH": "syscall",
"syscall.RTF_MPLS": "syscall",
"syscall.RTF_MSS": "syscall",
"syscall.RTF_MTU": "syscall",
"syscall.RTF_MULTICAST": "syscall",
"syscall.RTF_NAT": "syscall",
"syscall.RTF_NOFORWARD": "syscall",
"syscall.RTF_NONEXTHOP": "syscall",
"syscall.RTF_NOPMTUDISC": "syscall",
"syscall.RTF_PERMANENT_ARP": "syscall",
"syscall.RTF_PINNED": "syscall",
"syscall.RTF_POLICY": "syscall",
"syscall.RTF_PRCLONING": "syscall",
"syscall.RTF_PROTO1": "syscall",
"syscall.RTF_PROTO2": "syscall",
"syscall.RTF_PROTO3": "syscall",
"syscall.RTF_REINSTATE": "syscall",
"syscall.RTF_REJECT": "syscall",
"syscall.RTF_RNH_LOCKED": "syscall",
"syscall.RTF_SOURCE": "syscall",
"syscall.RTF_SRC": "syscall",
"syscall.RTF_STATIC": "syscall",
"syscall.RTF_STICKY": "syscall",
"syscall.RTF_THROW": "syscall",
"syscall.RTF_TUNNEL": "syscall",
"syscall.RTF_UP": "syscall",
"syscall.RTF_USETRAILERS": "syscall",
"syscall.RTF_WASCLONED": "syscall",
"syscall.RTF_WINDOW": "syscall",
"syscall.RTF_XRESOLVE": "syscall",
"syscall.RTM_ADD": "syscall",
"syscall.RTM_BASE": "syscall",
"syscall.RTM_CHANGE": "syscall",
"syscall.RTM_CHGADDR": "syscall",
"syscall.RTM_DELACTION": "syscall",
"syscall.RTM_DELADDR": "syscall",
"syscall.RTM_DELADDRLABEL": "syscall",
"syscall.RTM_DELETE": "syscall",
"syscall.RTM_DELLINK": "syscall",
"syscall.RTM_DELMADDR": "syscall",
"syscall.RTM_DELNEIGH": "syscall",
"syscall.RTM_DELQDISC": "syscall",
"syscall.RTM_DELROUTE": "syscall",
"syscall.RTM_DELRULE": "syscall",
"syscall.RTM_DELTCLASS": "syscall",
"syscall.RTM_DELTFILTER": "syscall",
"syscall.RTM_DESYNC": "syscall",
"syscall.RTM_F_CLONED": "syscall",
"syscall.RTM_F_EQUALIZE": "syscall",
"syscall.RTM_F_NOTIFY": "syscall",
"syscall.RTM_F_PREFIX": "syscall",
"syscall.RTM_GET": "syscall",
"syscall.RTM_GET2": "syscall",
"syscall.RTM_GETACTION": "syscall",
"syscall.RTM_GETADDR": "syscall",
"syscall.RTM_GETADDRLABEL": "syscall",
"syscall.RTM_GETANYCAST": "syscall",
"syscall.RTM_GETDCB": "syscall",
"syscall.RTM_GETLINK": "syscall",
"syscall.RTM_GETMULTICAST": "syscall",
"syscall.RTM_GETNEIGH": "syscall",
"syscall.RTM_GETNEIGHTBL": "syscall",
"syscall.RTM_GETQDISC": "syscall",
"syscall.RTM_GETROUTE": "syscall",
"syscall.RTM_GETRULE": "syscall",
"syscall.RTM_GETTCLASS": "syscall",
"syscall.RTM_GETTFILTER": "syscall",
"syscall.RTM_IEEE80211": "syscall",
"syscall.RTM_IFANNOUNCE": "syscall",
"syscall.RTM_IFINFO": "syscall",
"syscall.RTM_IFINFO2": "syscall",
"syscall.RTM_LLINFO_UPD": "syscall",
"syscall.RTM_LOCK": "syscall",
"syscall.RTM_LOSING": "syscall",
"syscall.RTM_MAX": "syscall",
"syscall.RTM_MAXSIZE": "syscall",
"syscall.RTM_MISS": "syscall",
"syscall.RTM_NEWACTION": "syscall",
"syscall.RTM_NEWADDR": "syscall",
"syscall.RTM_NEWADDRLABEL": "syscall",
"syscall.RTM_NEWLINK": "syscall",
"syscall.RTM_NEWMADDR": "syscall",
"syscall.RTM_NEWMADDR2": "syscall",
"syscall.RTM_NEWNDUSEROPT": "syscall",
"syscall.RTM_NEWNEIGH": "syscall",
"syscall.RTM_NEWNEIGHTBL": "syscall",
"syscall.RTM_NEWPREFIX": "syscall",
"syscall.RTM_NEWQDISC": "syscall",
"syscall.RTM_NEWROUTE": "syscall",
"syscall.RTM_NEWRULE": "syscall",
"syscall.RTM_NEWTCLASS": "syscall",
"syscall.RTM_NEWTFILTER": "syscall",
"syscall.RTM_NR_FAMILIES": "syscall",
"syscall.RTM_NR_MSGTYPES": "syscall",
"syscall.RTM_OIFINFO": "syscall",
"syscall.RTM_OLDADD": "syscall",
"syscall.RTM_OLDDEL": "syscall",
"syscall.RTM_OOIFINFO": "syscall",
"syscall.RTM_REDIRECT": "syscall",
"syscall.RTM_RESOLVE": "syscall",
"syscall.RTM_RTTUNIT": "syscall",
"syscall.RTM_SETDCB": "syscall",
"syscall.RTM_SETGATE": "syscall",
"syscall.RTM_SETLINK": "syscall",
"syscall.RTM_SETNEIGHTBL": "syscall",
"syscall.RTM_VERSION": "syscall",
"syscall.RTNH_ALIGNTO": "syscall",
"syscall.RTNH_F_DEAD": "syscall",
"syscall.RTNH_F_ONLINK": "syscall",
"syscall.RTNH_F_PERVASIVE": "syscall",
"syscall.RTNLGRP_IPV4_IFADDR": "syscall",
"syscall.RTNLGRP_IPV4_MROUTE": "syscall",
"syscall.RTNLGRP_IPV4_ROUTE": "syscall",
"syscall.RTNLGRP_IPV4_RULE": "syscall",
"syscall.RTNLGRP_IPV6_IFADDR": "syscall",
"syscall.RTNLGRP_IPV6_IFINFO": "syscall",
"syscall.RTNLGRP_IPV6_MROUTE": "syscall",
"syscall.RTNLGRP_IPV6_PREFIX": "syscall",
"syscall.RTNLGRP_IPV6_ROUTE": "syscall",
"syscall.RTNLGRP_IPV6_RULE": "syscall",
"syscall.RTNLGRP_LINK": "syscall",
"syscall.RTNLGRP_ND_USEROPT": "syscall",
"syscall.RTNLGRP_NEIGH": "syscall",
"syscall.RTNLGRP_NONE": "syscall",
"syscall.RTNLGRP_NOTIFY": "syscall",
"syscall.RTNLGRP_TC": "syscall",
"syscall.RTN_ANYCAST": "syscall",
"syscall.RTN_BLACKHOLE": "syscall",
"syscall.RTN_BROADCAST": "syscall",
"syscall.RTN_LOCAL": "syscall",
"syscall.RTN_MAX": "syscall",
"syscall.RTN_MULTICAST": "syscall",
"syscall.RTN_NAT": "syscall",
"syscall.RTN_PROHIBIT": "syscall",
"syscall.RTN_THROW": "syscall",
"syscall.RTN_UNICAST": "syscall",
"syscall.RTN_UNREACHABLE": "syscall",
"syscall.RTN_UNSPEC": "syscall",
"syscall.RTN_XRESOLVE": "syscall",
"syscall.RTPROT_BIRD": "syscall",
"syscall.RTPROT_BOOT": "syscall",
"syscall.RTPROT_DHCP": "syscall",
"syscall.RTPROT_DNROUTED": "syscall",
"syscall.RTPROT_GATED": "syscall",
"syscall.RTPROT_KERNEL": "syscall",
"syscall.RTPROT_MRT": "syscall",
"syscall.RTPROT_NTK": "syscall",
"syscall.RTPROT_RA": "syscall",
"syscall.RTPROT_REDIRECT": "syscall",
"syscall.RTPROT_STATIC": "syscall",
"syscall.RTPROT_UNSPEC": "syscall",
"syscall.RTPROT_XORP": "syscall",
"syscall.RTPROT_ZEBRA": "syscall",
"syscall.RTV_EXPIRE": "syscall",
"syscall.RTV_HOPCOUNT": "syscall",
"syscall.RTV_MTU": "syscall",
"syscall.RTV_RPIPE": "syscall",
"syscall.RTV_RTT": "syscall",
"syscall.RTV_RTTVAR": "syscall",
"syscall.RTV_SPIPE": "syscall",
"syscall.RTV_SSTHRESH": "syscall",
"syscall.RTV_WEIGHT": "syscall",
"syscall.RT_CACHING_CONTEXT": "syscall",
"syscall.RT_CLASS_DEFAULT": "syscall",
"syscall.RT_CLASS_LOCAL": "syscall",
"syscall.RT_CLASS_MAIN": "syscall",
"syscall.RT_CLASS_MAX": "syscall",
"syscall.RT_CLASS_UNSPEC": "syscall",
"syscall.RT_DEFAULT_FIB": "syscall",
"syscall.RT_NORTREF": "syscall",
"syscall.RT_SCOPE_HOST": "syscall",
"syscall.RT_SCOPE_LINK": "syscall",
"syscall.RT_SCOPE_NOWHERE": "syscall",
"syscall.RT_SCOPE_SITE": "syscall",
"syscall.RT_SCOPE_UNIVERSE": "syscall",
"syscall.RT_TABLEID_MAX": "syscall",
"syscall.RT_TABLE_COMPAT": "syscall",
"syscall.RT_TABLE_DEFAULT": "syscall",
"syscall.RT_TABLE_LOCAL": "syscall",
"syscall.RT_TABLE_MAIN": "syscall",
"syscall.RT_TABLE_MAX": "syscall",
"syscall.RT_TABLE_UNSPEC": "syscall",
"syscall.RUSAGE_CHILDREN": "syscall",
"syscall.RUSAGE_SELF": "syscall",
"syscall.RUSAGE_THREAD": "syscall",
"syscall.Radvisory_t": "syscall",
"syscall.RawSockaddr": "syscall",
"syscall.RawSockaddrAny": "syscall",
"syscall.RawSockaddrDatalink": "syscall",
"syscall.RawSockaddrInet4": "syscall",
"syscall.RawSockaddrInet6": "syscall",
"syscall.RawSockaddrLinklayer": "syscall",
"syscall.RawSockaddrNetlink": "syscall",
"syscall.RawSockaddrUnix": "syscall",
"syscall.RawSyscall": "syscall",
"syscall.RawSyscall6": "syscall",
"syscall.Read": "syscall",
"syscall.ReadConsole": "syscall",
"syscall.ReadDirectoryChanges": "syscall",
"syscall.ReadDirent": "syscall",
"syscall.ReadFile": "syscall",
"syscall.Readlink": "syscall",
"syscall.Reboot": "syscall",
"syscall.Recvfrom": "syscall",
"syscall.Recvmsg": "syscall",
"syscall.RegCloseKey": "syscall",
"syscall.RegEnumKeyEx": "syscall",
"syscall.RegOpenKeyEx": "syscall",
"syscall.RegQueryInfoKey": "syscall",
"syscall.RegQueryValueEx": "syscall",
"syscall.RemoveDirectory": "syscall",
"syscall.Removexattr": "syscall",
"syscall.Rename": "syscall",
"syscall.Renameat": "syscall",
"syscall.Revoke": "syscall",
"syscall.Rlimit": "syscall",
"syscall.Rmdir": "syscall",
"syscall.RouteMessage": "syscall",
"syscall.RouteRIB": "syscall",
"syscall.RtAttr": "syscall",
"syscall.RtGenmsg": "syscall",
"syscall.RtMetrics": "syscall",
"syscall.RtMsg": "syscall",
"syscall.RtMsghdr": "syscall",
"syscall.RtNexthop": "syscall",
"syscall.Rusage": "syscall",
"syscall.SCM_BINTIME": "syscall",
"syscall.SCM_CREDENTIALS": "syscall",
"syscall.SCM_CREDS": "syscall",
"syscall.SCM_RIGHTS": "syscall",
"syscall.SCM_TIMESTAMP": "syscall",
"syscall.SCM_TIMESTAMPING": "syscall",
"syscall.SCM_TIMESTAMPNS": "syscall",
"syscall.SCM_TIMESTAMP_MONOTONIC": "syscall",
"syscall.SHUT_RD": "syscall",
"syscall.SHUT_RDWR": "syscall",
"syscall.SHUT_WR": "syscall",
"syscall.SID": "syscall",
"syscall.SIDAndAttributes": "syscall",
"syscall.SIGABRT": "syscall",
"syscall.SIGALRM": "syscall",
"syscall.SIGBUS": "syscall",
"syscall.SIGCHLD": "syscall",
"syscall.SIGCLD": "syscall",
"syscall.SIGCONT": "syscall",
"syscall.SIGEMT": "syscall",
"syscall.SIGFPE": "syscall",
"syscall.SIGHUP": "syscall",
"syscall.SIGILL": "syscall",
"syscall.SIGINFO": "syscall",
"syscall.SIGINT": "syscall",
"syscall.SIGIO": "syscall",
"syscall.SIGIOT": "syscall",
"syscall.SIGKILL": "syscall",
"syscall.SIGLIBRT": "syscall",
"syscall.SIGLWP": "syscall",
"syscall.SIGPIPE": "syscall",
"syscall.SIGPOLL": "syscall",
"syscall.SIGPROF": "syscall",
"syscall.SIGPWR": "syscall",
"syscall.SIGQUIT": "syscall",
"syscall.SIGSEGV": "syscall",
"syscall.SIGSTKFLT": "syscall",
"syscall.SIGSTOP": "syscall",
"syscall.SIGSYS": "syscall",
"syscall.SIGTERM": "syscall",
"syscall.SIGTHR": "syscall",
"syscall.SIGTRAP": "syscall",
"syscall.SIGTSTP": "syscall",
"syscall.SIGTTIN": "syscall",
"syscall.SIGTTOU": "syscall",
"syscall.SIGUNUSED": "syscall",
"syscall.SIGURG": "syscall",
"syscall.SIGUSR1": "syscall",
"syscall.SIGUSR2": "syscall",
"syscall.SIGVTALRM": "syscall",
"syscall.SIGWINCH": "syscall",
"syscall.SIGXCPU": "syscall",
"syscall.SIGXFSZ": "syscall",
"syscall.SIOCADDDLCI": "syscall",
"syscall.SIOCADDMULTI": "syscall",
"syscall.SIOCADDRT": "syscall",
"syscall.SIOCAIFADDR": "syscall",
"syscall.SIOCAIFGROUP": "syscall",
"syscall.SIOCALIFADDR": "syscall",
"syscall.SIOCARPIPLL": "syscall",
"syscall.SIOCATMARK": "syscall",
"syscall.SIOCAUTOADDR": "syscall",
"syscall.SIOCAUTONETMASK": "syscall",
"syscall.SIOCBRDGADD": "syscall",
"syscall.SIOCBRDGADDS": "syscall",
"syscall.SIOCBRDGARL": "syscall",
"syscall.SIOCBRDGDADDR": "syscall",
"syscall.SIOCBRDGDEL": "syscall",
"syscall.SIOCBRDGDELS": "syscall",
"syscall.SIOCBRDGFLUSH": "syscall",
"syscall.SIOCBRDGFRL": "syscall",
"syscall.SIOCBRDGGCACHE": "syscall",
"syscall.SIOCBRDGGFD": "syscall",
"syscall.SIOCBRDGGHT": "syscall",
"syscall.SIOCBRDGGIFFLGS": "syscall",
"syscall.SIOCBRDGGMA": "syscall",
"syscall.SIOCBRDGGPARAM": "syscall",
"syscall.SIOCBRDGGPRI": "syscall",
"syscall.SIOCBRDGGRL": "syscall",
"syscall.SIOCBRDGGSIFS": "syscall",
"syscall.SIOCBRDGGTO": "syscall",
"syscall.SIOCBRDGIFS": "syscall",
"syscall.SIOCBRDGRTS": "syscall",
"syscall.SIOCBRDGSADDR": "syscall",
"syscall.SIOCBRDGSCACHE": "syscall",
"syscall.SIOCBRDGSFD": "syscall",
"syscall.SIOCBRDGSHT": "syscall",
"syscall.SIOCBRDGSIFCOST": "syscall",
"syscall.SIOCBRDGSIFFLGS": "syscall",
"syscall.SIOCBRDGSIFPRIO": "syscall",
"syscall.SIOCBRDGSMA": "syscall",
"syscall.SIOCBRDGSPRI": "syscall",
"syscall.SIOCBRDGSPROTO": "syscall",
"syscall.SIOCBRDGSTO": "syscall",
"syscall.SIOCBRDGSTXHC": "syscall",
"syscall.SIOCDARP": "syscall",
"syscall.SIOCDELDLCI": "syscall",
"syscall.SIOCDELMULTI": "syscall",
"syscall.SIOCDELRT": "syscall",
"syscall.SIOCDEVPRIVATE": "syscall",
"syscall.SIOCDIFADDR": "syscall",
"syscall.SIOCDIFGROUP": "syscall",
"syscall.SIOCDIFPHYADDR": "syscall",
"syscall.SIOCDLIFADDR": "syscall",
"syscall.SIOCDRARP": "syscall",
"syscall.SIOCGARP": "syscall",
"syscall.SIOCGDRVSPEC": "syscall",
"syscall.SIOCGETKALIVE": "syscall",
"syscall.SIOCGETLABEL": "syscall",
"syscall.SIOCGETPFLOW": "syscall",
"syscall.SIOCGETPFSYNC": "syscall",
"syscall.SIOCGETSGCNT": "syscall",
"syscall.SIOCGETVIFCNT": "syscall",
"syscall.SIOCGETVLAN": "syscall",
"syscall.SIOCGHIWAT": "syscall",
"syscall.SIOCGIFADDR": "syscall",
"syscall.SIOCGIFADDRPREF": "syscall",
"syscall.SIOCGIFALIAS": "syscall",
"syscall.SIOCGIFALTMTU": "syscall",
"syscall.SIOCGIFASYNCMAP": "syscall",
"syscall.SIOCGIFBOND": "syscall",
"syscall.SIOCGIFBR": "syscall",
"syscall.SIOCGIFBRDADDR": "syscall",
"syscall.SIOCGIFCAP": "syscall",
"syscall.SIOCGIFCONF": "syscall",
"syscall.SIOCGIFCOUNT": "syscall",
"syscall.SIOCGIFDATA": "syscall",
"syscall.SIOCGIFDESCR": "syscall",
"syscall.SIOCGIFDEVMTU": "syscall",
"syscall.SIOCGIFDLT": "syscall",
"syscall.SIOCGIFDSTADDR": "syscall",
"syscall.SIOCGIFENCAP": "syscall",
"syscall.SIOCGIFFIB": "syscall",
"syscall.SIOCGIFFLAGS": "syscall",
"syscall.SIOCGIFGATTR": "syscall",
"syscall.SIOCGIFGENERIC": "syscall",
"syscall.SIOCGIFGMEMB": "syscall",
"syscall.SIOCGIFGROUP": "syscall",
"syscall.SIOCGIFHARDMTU": "syscall",
"syscall.SIOCGIFHWADDR": "syscall",
"syscall.SIOCGIFINDEX": "syscall",
"syscall.SIOCGIFKPI": "syscall",
"syscall.SIOCGIFMAC": "syscall",
"syscall.SIOCGIFMAP": "syscall",
"syscall.SIOCGIFMEDIA": "syscall",
"syscall.SIOCGIFMEM": "syscall",
"syscall.SIOCGIFMETRIC": "syscall",
"syscall.SIOCGIFMTU": "syscall",
"syscall.SIOCGIFNAME": "syscall",
"syscall.SIOCGIFNETMASK": "syscall",
"syscall.SIOCGIFPDSTADDR": "syscall",
"syscall.SIOCGIFPFLAGS": "syscall",
"syscall.SIOCGIFPHYS": "syscall",
"syscall.SIOCGIFPRIORITY": "syscall",
"syscall.SIOCGIFPSRCADDR": "syscall",
"syscall.SIOCGIFRDOMAIN": "syscall",
"syscall.SIOCGIFRTLABEL": "syscall",
"syscall.SIOCGIFSLAVE": "syscall",
"syscall.SIOCGIFSTATUS": "syscall",
"syscall.SIOCGIFTIMESLOT": "syscall",
"syscall.SIOCGIFTXQLEN": "syscall",
"syscall.SIOCGIFVLAN": "syscall",
"syscall.SIOCGIFWAKEFLAGS": "syscall",
"syscall.SIOCGIFXFLAGS": "syscall",
"syscall.SIOCGLIFADDR": "syscall",
"syscall.SIOCGLIFPHYADDR": "syscall",
"syscall.SIOCGLIFPHYRTABLE": "syscall",
"syscall.SIOCGLIFPHYTTL": "syscall",
"syscall.SIOCGLINKSTR": "syscall",
"syscall.SIOCGLOWAT": "syscall",
"syscall.SIOCGPGRP": "syscall",
"syscall.SIOCGPRIVATE_0": "syscall",
"syscall.SIOCGPRIVATE_1": "syscall",
"syscall.SIOCGRARP": "syscall",
"syscall.SIOCGSPPPPARAMS": "syscall",
"syscall.SIOCGSTAMP": "syscall",
"syscall.SIOCGSTAMPNS": "syscall",
"syscall.SIOCGVH": "syscall",
"syscall.SIOCGVNETID": "syscall",
"syscall.SIOCIFCREATE": "syscall",
"syscall.SIOCIFCREATE2": "syscall",
"syscall.SIOCIFDESTROY": "syscall",
"syscall.SIOCIFGCLONERS": "syscall",
"syscall.SIOCINITIFADDR": "syscall",
"syscall.SIOCPROTOPRIVATE": "syscall",
"syscall.SIOCRSLVMULTI": "syscall",
"syscall.SIOCRTMSG": "syscall",
"syscall.SIOCSARP": "syscall",
"syscall.SIOCSDRVSPEC": "syscall",
"syscall.SIOCSETKALIVE": "syscall",
"syscall.SIOCSETLABEL": "syscall",
"syscall.SIOCSETPFLOW": "syscall",
"syscall.SIOCSETPFSYNC": "syscall",
"syscall.SIOCSETVLAN": "syscall",
"syscall.SIOCSHIWAT": "syscall",
"syscall.SIOCSIFADDR": "syscall",
"syscall.SIOCSIFADDRPREF": "syscall",
"syscall.SIOCSIFALTMTU": "syscall",
"syscall.SIOCSIFASYNCMAP": "syscall",
"syscall.SIOCSIFBOND": "syscall",
"syscall.SIOCSIFBR": "syscall",
"syscall.SIOCSIFBRDADDR": "syscall",
"syscall.SIOCSIFCAP": "syscall",
"syscall.SIOCSIFDESCR": "syscall",
"syscall.SIOCSIFDSTADDR": "syscall",
"syscall.SIOCSIFENCAP": "syscall",
"syscall.SIOCSIFFIB": "syscall",
"syscall.SIOCSIFFLAGS": "syscall",
"syscall.SIOCSIFGATTR": "syscall",
"syscall.SIOCSIFGENERIC": "syscall",
"syscall.SIOCSIFHWADDR": "syscall",
"syscall.SIOCSIFHWBROADCAST": "syscall",
"syscall.SIOCSIFKPI": "syscall",
"syscall.SIOCSIFLINK": "syscall",
"syscall.SIOCSIFLLADDR": "syscall",
"syscall.SIOCSIFMAC": "syscall",
"syscall.SIOCSIFMAP": "syscall",
"syscall.SIOCSIFMEDIA": "syscall",
"syscall.SIOCSIFMEM": "syscall",
"syscall.SIOCSIFMETRIC": "syscall",
"syscall.SIOCSIFMTU": "syscall",
"syscall.SIOCSIFNAME": "syscall",
"syscall.SIOCSIFNETMASK": "syscall",
"syscall.SIOCSIFPFLAGS": "syscall",
"syscall.SIOCSIFPHYADDR": "syscall",
"syscall.SIOCSIFPHYS": "syscall",
"syscall.SIOCSIFPRIORITY": "syscall",
"syscall.SIOCSIFRDOMAIN": "syscall",
"syscall.SIOCSIFRTLABEL": "syscall",
"syscall.SIOCSIFRVNET": "syscall",
"syscall.SIOCSIFSLAVE": "syscall",
"syscall.SIOCSIFTIMESLOT": "syscall",
"syscall.SIOCSIFTXQLEN": "syscall",
"syscall.SIOCSIFVLAN": "syscall",
"syscall.SIOCSIFVNET": "syscall",
"syscall.SIOCSIFXFLAGS": "syscall",
"syscall.SIOCSLIFPHYADDR": "syscall",
"syscall.SIOCSLIFPHYRTABLE": "syscall",
"syscall.SIOCSLIFPHYTTL": "syscall",
"syscall.SIOCSLINKSTR": "syscall",
"syscall.SIOCSLOWAT": "syscall",
"syscall.SIOCSPGRP": "syscall",
"syscall.SIOCSRARP": "syscall",
"syscall.SIOCSSPPPPARAMS": "syscall",
"syscall.SIOCSVH": "syscall",
"syscall.SIOCSVNETID": "syscall",
"syscall.SIOCZIFDATA": "syscall",
"syscall.SIO_GET_EXTENSION_FUNCTION_POINTER": "syscall",
"syscall.SIO_GET_INTERFACE_LIST": "syscall",
"syscall.SIO_KEEPALIVE_VALS": "syscall",
"syscall.SIO_UDP_CONNRESET": "syscall",
"syscall.SOCK_CLOEXEC": "syscall",
"syscall.SOCK_DCCP": "syscall",
"syscall.SOCK_DGRAM": "syscall",
"syscall.SOCK_FLAGS_MASK": "syscall",
"syscall.SOCK_MAXADDRLEN": "syscall",
"syscall.SOCK_NONBLOCK": "syscall",
"syscall.SOCK_NOSIGPIPE": "syscall",
"syscall.SOCK_PACKET": "syscall",
"syscall.SOCK_RAW": "syscall",
"syscall.SOCK_RDM": "syscall",
"syscall.SOCK_SEQPACKET": "syscall",
"syscall.SOCK_STREAM": "syscall",
"syscall.SOL_AAL": "syscall",
"syscall.SOL_ATM": "syscall",
"syscall.SOL_DECNET": "syscall",
"syscall.SOL_ICMPV6": "syscall",
"syscall.SOL_IP": "syscall",
"syscall.SOL_IPV6": "syscall",
"syscall.SOL_IRDA": "syscall",
"syscall.SOL_PACKET": "syscall",
"syscall.SOL_RAW": "syscall",
"syscall.SOL_SOCKET": "syscall",
"syscall.SOL_TCP": "syscall",
"syscall.SOL_X25": "syscall",
"syscall.SOMAXCONN": "syscall",
"syscall.SO_ACCEPTCONN": "syscall",
"syscall.SO_ACCEPTFILTER": "syscall",
"syscall.SO_ATTACH_FILTER": "syscall",
"syscall.SO_BINDANY": "syscall",
"syscall.SO_BINDTODEVICE": "syscall",
"syscall.SO_BINTIME": "syscall",
"syscall.SO_BROADCAST": "syscall",
"syscall.SO_BSDCOMPAT": "syscall",
"syscall.SO_DEBUG": "syscall",
"syscall.SO_DETACH_FILTER": "syscall",
"syscall.SO_DOMAIN": "syscall",
"syscall.SO_DONTROUTE": "syscall",
"syscall.SO_DONTTRUNC": "syscall",
"syscall.SO_ERROR": "syscall",
"syscall.SO_KEEPALIVE": "syscall",
"syscall.SO_LABEL": "syscall",
"syscall.SO_LINGER": "syscall",
"syscall.SO_LINGER_SEC": "syscall",
"syscall.SO_LISTENINCQLEN": "syscall",
"syscall.SO_LISTENQLEN": "syscall",
"syscall.SO_LISTENQLIMIT": "syscall",
"syscall.SO_MARK": "syscall",
"syscall.SO_NETPROC": "syscall",
"syscall.SO_NKE": "syscall",
"syscall.SO_NOADDRERR": "syscall",
"syscall.SO_NOHEADER": "syscall",
"syscall.SO_NOSIGPIPE": "syscall",
"syscall.SO_NOTIFYCONFLICT": "syscall",
"syscall.SO_NO_CHECK": "syscall",
"syscall.SO_NO_DDP": "syscall",
"syscall.SO_NO_OFFLOAD": "syscall",
"syscall.SO_NP_EXTENSIONS": "syscall",
"syscall.SO_NREAD": "syscall",
"syscall.SO_NWRITE": "syscall",
"syscall.SO_OOBINLINE": "syscall",
"syscall.SO_OVERFLOWED": "syscall",
"syscall.SO_PASSCRED": "syscall",
"syscall.SO_PASSSEC": "syscall",
"syscall.SO_PEERCRED": "syscall",
"syscall.SO_PEERLABEL": "syscall",
"syscall.SO_PEERNAME": "syscall",
"syscall.SO_PEERSEC": "syscall",
"syscall.SO_PRIORITY": "syscall",
"syscall.SO_PROTOCOL": "syscall",
"syscall.SO_PROTOTYPE": "syscall",
"syscall.SO_RANDOMPORT": "syscall",
"syscall.SO_RCVBUF": "syscall",
"syscall.SO_RCVBUFFORCE": "syscall",
"syscall.SO_RCVLOWAT": "syscall",
"syscall.SO_RCVTIMEO": "syscall",
"syscall.SO_RESTRICTIONS": "syscall",
"syscall.SO_RESTRICT_DENYIN": "syscall",
"syscall.SO_RESTRICT_DENYOUT": "syscall",
"syscall.SO_RESTRICT_DENYSET": "syscall",
"syscall.SO_REUSEADDR": "syscall",
"syscall.SO_REUSEPORT": "syscall",
"syscall.SO_REUSESHAREUID": "syscall",
"syscall.SO_RTABLE": "syscall",
"syscall.SO_RXQ_OVFL": "syscall",
"syscall.SO_SECURITY_AUTHENTICATION": "syscall",
"syscall.SO_SECURITY_ENCRYPTION_NETWORK": "syscall",
"syscall.SO_SECURITY_ENCRYPTION_TRANSPORT": "syscall",
"syscall.SO_SETFIB": "syscall",
"syscall.SO_SNDBUF": "syscall",
"syscall.SO_SNDBUFFORCE": "syscall",
"syscall.SO_SNDLOWAT": "syscall",
"syscall.SO_SNDTIMEO": "syscall",
"syscall.SO_SPLICE": "syscall",
"syscall.SO_TIMESTAMP": "syscall",
"syscall.SO_TIMESTAMPING": "syscall",
"syscall.SO_TIMESTAMPNS": "syscall",
"syscall.SO_TIMESTAMP_MONOTONIC": "syscall",
"syscall.SO_TYPE": "syscall",
"syscall.SO_UPCALLCLOSEWAIT": "syscall",
"syscall.SO_UPDATE_ACCEPT_CONTEXT": "syscall",
"syscall.SO_UPDATE_CONNECT_CONTEXT": "syscall",
"syscall.SO_USELOOPBACK": "syscall",
"syscall.SO_USER_COOKIE": "syscall",
"syscall.SO_VENDOR": "syscall",
"syscall.SO_WANTMORE": "syscall",
"syscall.SO_WANTOOBFLAG": "syscall",
"syscall.SSLExtraCertChainPolicyPara": "syscall",
"syscall.STANDARD_RIGHTS_ALL": "syscall",
"syscall.STANDARD_RIGHTS_EXECUTE": "syscall",
"syscall.STANDARD_RIGHTS_READ": "syscall",
"syscall.STANDARD_RIGHTS_REQUIRED": "syscall",
"syscall.STANDARD_RIGHTS_WRITE": "syscall",
"syscall.STARTF_USESHOWWINDOW": "syscall",
"syscall.STARTF_USESTDHANDLES": "syscall",
"syscall.STD_ERROR_HANDLE": "syscall",
"syscall.STD_INPUT_HANDLE": "syscall",
"syscall.STD_OUTPUT_HANDLE": "syscall",
"syscall.SUBLANG_ENGLISH_US": "syscall",
"syscall.SW_FORCEMINIMIZE": "syscall",
"syscall.SW_HIDE": "syscall",
"syscall.SW_MAXIMIZE": "syscall",
"syscall.SW_MINIMIZE": "syscall",
"syscall.SW_NORMAL": "syscall",
"syscall.SW_RESTORE": "syscall",
"syscall.SW_SHOW": "syscall",
"syscall.SW_SHOWDEFAULT": "syscall",
"syscall.SW_SHOWMAXIMIZED": "syscall",
"syscall.SW_SHOWMINIMIZED": "syscall",
"syscall.SW_SHOWMINNOACTIVE": "syscall",
"syscall.SW_SHOWNA": "syscall",
"syscall.SW_SHOWNOACTIVATE": "syscall",
"syscall.SW_SHOWNORMAL": "syscall",
"syscall.SYMBOLIC_LINK_FLAG_DIRECTORY": "syscall",
"syscall.SYNCHRONIZE": "syscall",
"syscall.SYSCTL_VERSION": "syscall",
"syscall.SYSCTL_VERS_0": "syscall",
"syscall.SYSCTL_VERS_1": "syscall",
"syscall.SYSCTL_VERS_MASK": "syscall",
"syscall.SYS_ABORT2": "syscall",
"syscall.SYS_ACCEPT": "syscall",
"syscall.SYS_ACCEPT4": "syscall",
"syscall.SYS_ACCEPT_NOCANCEL": "syscall",
"syscall.SYS_ACCESS": "syscall",
"syscall.SYS_ACCESS_EXTENDED": "syscall",
"syscall.SYS_ACCT": "syscall",
"syscall.SYS_ADD_KEY": "syscall",
"syscall.SYS_ADD_PROFIL": "syscall",
"syscall.SYS_ADJFREQ": "syscall",
"syscall.SYS_ADJTIME": "syscall",
"syscall.SYS_ADJTIMEX": "syscall",
"syscall.SYS_AFS_SYSCALL": "syscall",
"syscall.SYS_AIO_CANCEL": "syscall",
"syscall.SYS_AIO_ERROR": "syscall",
"syscall.SYS_AIO_FSYNC": "syscall",
"syscall.SYS_AIO_READ": "syscall",
"syscall.SYS_AIO_RETURN": "syscall",
"syscall.SYS_AIO_SUSPEND": "syscall",
"syscall.SYS_AIO_SUSPEND_NOCANCEL": "syscall",
"syscall.SYS_AIO_WRITE": "syscall",
"syscall.SYS_ALARM": "syscall",
"syscall.SYS_ARCH_PRCTL": "syscall",
"syscall.SYS_ARM_FADVISE64_64": "syscall",
"syscall.SYS_ARM_SYNC_FILE_RANGE": "syscall",
"syscall.SYS_ATGETMSG": "syscall",
"syscall.SYS_ATPGETREQ": "syscall",
"syscall.SYS_ATPGETRSP": "syscall",
"syscall.SYS_ATPSNDREQ": "syscall",
"syscall.SYS_ATPSNDRSP": "syscall",
"syscall.SYS_ATPUTMSG": "syscall",
"syscall.SYS_ATSOCKET": "syscall",
"syscall.SYS_AUDIT": "syscall",
"syscall.SYS_AUDITCTL": "syscall",
"syscall.SYS_AUDITON": "syscall",
"syscall.SYS_AUDIT_SESSION_JOIN": "syscall",
"syscall.SYS_AUDIT_SESSION_PORT": "syscall",
"syscall.SYS_AUDIT_SESSION_SELF": "syscall",
"syscall.SYS_BDFLUSH": "syscall",
"syscall.SYS_BIND": "syscall",
"syscall.SYS_BINDAT": "syscall",
"syscall.SYS_BREAK": "syscall",
"syscall.SYS_BRK": "syscall",
"syscall.SYS_BSDTHREAD_CREATE": "syscall",
"syscall.SYS_BSDTHREAD_REGISTER": "syscall",
"syscall.SYS_BSDTHREAD_TERMINATE": "syscall",
"syscall.SYS_CAPGET": "syscall",
"syscall.SYS_CAPSET": "syscall",
"syscall.SYS_CAP_ENTER": "syscall",
"syscall.SYS_CAP_FCNTLS_GET": "syscall",
"syscall.SYS_CAP_FCNTLS_LIMIT": "syscall",
"syscall.SYS_CAP_GETMODE": "syscall",
"syscall.SYS_CAP_GETRIGHTS": "syscall",
"syscall.SYS_CAP_IOCTLS_GET": "syscall",
"syscall.SYS_CAP_IOCTLS_LIMIT": "syscall",
"syscall.SYS_CAP_NEW": "syscall",
"syscall.SYS_CAP_RIGHTS_GET": "syscall",
"syscall.SYS_CAP_RIGHTS_LIMIT": "syscall",
"syscall.SYS_CHDIR": "syscall",
"syscall.SYS_CHFLAGS": "syscall",
"syscall.SYS_CHFLAGSAT": "syscall",
"syscall.SYS_CHMOD": "syscall",
"syscall.SYS_CHMOD_EXTENDED": "syscall",
"syscall.SYS_CHOWN": "syscall",
"syscall.SYS_CHOWN32": "syscall",
"syscall.SYS_CHROOT": "syscall",
"syscall.SYS_CHUD": "syscall",
"syscall.SYS_CLOCK_ADJTIME": "syscall",
"syscall.SYS_CLOCK_GETCPUCLOCKID2": "syscall",
"syscall.SYS_CLOCK_GETRES": "syscall",
"syscall.SYS_CLOCK_GETTIME": "syscall",
"syscall.SYS_CLOCK_NANOSLEEP": "syscall",
"syscall.SYS_CLOCK_SETTIME": "syscall",
"syscall.SYS_CLONE": "syscall",
"syscall.SYS_CLOSE": "syscall",
"syscall.SYS_CLOSEFROM": "syscall",
"syscall.SYS_CLOSE_NOCANCEL": "syscall",
"syscall.SYS_CONNECT": "syscall",
"syscall.SYS_CONNECTAT": "syscall",
"syscall.SYS_CONNECT_NOCANCEL": "syscall",
"syscall.SYS_COPYFILE": "syscall",
"syscall.SYS_CPUSET": "syscall",
"syscall.SYS_CPUSET_GETAFFINITY": "syscall",
"syscall.SYS_CPUSET_GETID": "syscall",
"syscall.SYS_CPUSET_SETAFFINITY": "syscall",
"syscall.SYS_CPUSET_SETID": "syscall",
"syscall.SYS_CREAT": "syscall",
"syscall.SYS_CREATE_MODULE": "syscall",
"syscall.SYS_CSOPS": "syscall",
"syscall.SYS_DELETE": "syscall",
"syscall.SYS_DELETE_MODULE": "syscall",
"syscall.SYS_DUP": "syscall",
"syscall.SYS_DUP2": "syscall",
"syscall.SYS_DUP3": "syscall",
"syscall.SYS_EACCESS": "syscall",
"syscall.SYS_EPOLL_CREATE": "syscall",
"syscall.SYS_EPOLL_CREATE1": "syscall",
"syscall.SYS_EPOLL_CTL": "syscall",
"syscall.SYS_EPOLL_CTL_OLD": "syscall",
"syscall.SYS_EPOLL_PWAIT": "syscall",
"syscall.SYS_EPOLL_WAIT": "syscall",
"syscall.SYS_EPOLL_WAIT_OLD": "syscall",
"syscall.SYS_EVENTFD": "syscall",
"syscall.SYS_EVENTFD2": "syscall",
"syscall.SYS_EXCHANGEDATA": "syscall",
"syscall.SYS_EXECVE": "syscall",
"syscall.SYS_EXIT": "syscall",
"syscall.SYS_EXIT_GROUP": "syscall",
"syscall.SYS_EXTATTRCTL": "syscall",
"syscall.SYS_EXTATTR_DELETE_FD": "syscall",
"syscall.SYS_EXTATTR_DELETE_FILE": "syscall",
"syscall.SYS_EXTATTR_DELETE_LINK": "syscall",
"syscall.SYS_EXTATTR_GET_FD": "syscall",
"syscall.SYS_EXTATTR_GET_FILE": "syscall",
"syscall.SYS_EXTATTR_GET_LINK": "syscall",
"syscall.SYS_EXTATTR_LIST_FD": "syscall",
"syscall.SYS_EXTATTR_LIST_FILE": "syscall",
"syscall.SYS_EXTATTR_LIST_LINK": "syscall",
"syscall.SYS_EXTATTR_SET_FD": "syscall",
"syscall.SYS_EXTATTR_SET_FILE": "syscall",
"syscall.SYS_EXTATTR_SET_LINK": "syscall",
"syscall.SYS_FACCESSAT": "syscall",
"syscall.SYS_FADVISE64": "syscall",
"syscall.SYS_FADVISE64_64": "syscall",
"syscall.SYS_FALLOCATE": "syscall",
"syscall.SYS_FANOTIFY_INIT": "syscall",
"syscall.SYS_FANOTIFY_MARK": "syscall",
"syscall.SYS_FCHDIR": "syscall",
"syscall.SYS_FCHFLAGS": "syscall",
"syscall.SYS_FCHMOD": "syscall",
"syscall.SYS_FCHMODAT": "syscall",
"syscall.SYS_FCHMOD_EXTENDED": "syscall",
"syscall.SYS_FCHOWN": "syscall",
"syscall.SYS_FCHOWN32": "syscall",
"syscall.SYS_FCHOWNAT": "syscall",
"syscall.SYS_FCHROOT": "syscall",
"syscall.SYS_FCNTL": "syscall",
"syscall.SYS_FCNTL64": "syscall",
"syscall.SYS_FCNTL_NOCANCEL": "syscall",
"syscall.SYS_FDATASYNC": "syscall",
"syscall.SYS_FEXECVE": "syscall",
"syscall.SYS_FFCLOCK_GETCOUNTER": "syscall",
"syscall.SYS_FFCLOCK_GETESTIMATE": "syscall",
"syscall.SYS_FFCLOCK_SETESTIMATE": "syscall",
"syscall.SYS_FFSCTL": "syscall",
"syscall.SYS_FGETATTRLIST": "syscall",
"syscall.SYS_FGETXATTR": "syscall",
"syscall.SYS_FHOPEN": "syscall",
"syscall.SYS_FHSTAT": "syscall",
"syscall.SYS_FHSTATFS": "syscall",
"syscall.SYS_FILEPORT_MAKEFD": "syscall",
"syscall.SYS_FILEPORT_MAKEPORT": "syscall",
"syscall.SYS_FKTRACE": "syscall",
"syscall.SYS_FLISTXATTR": "syscall",
"syscall.SYS_FLOCK": "syscall",
"syscall.SYS_FORK": "syscall",
"syscall.SYS_FPATHCONF": "syscall",
"syscall.SYS_FREEBSD6_FTRUNCATE": "syscall",
"syscall.SYS_FREEBSD6_LSEEK": "syscall",
"syscall.SYS_FREEBSD6_MMAP": "syscall",
"syscall.SYS_FREEBSD6_PREAD": "syscall",
"syscall.SYS_FREEBSD6_PWRITE": "syscall",
"syscall.SYS_FREEBSD6_TRUNCATE": "syscall",
"syscall.SYS_FREMOVEXATTR": "syscall",
"syscall.SYS_FSCTL": "syscall",
"syscall.SYS_FSETATTRLIST": "syscall",
"syscall.SYS_FSETXATTR": "syscall",
"syscall.SYS_FSGETPATH": "syscall",
"syscall.SYS_FSTAT": "syscall",
"syscall.SYS_FSTAT64": "syscall",
"syscall.SYS_FSTAT64_EXTENDED": "syscall",
"syscall.SYS_FSTATAT": "syscall",
"syscall.SYS_FSTATAT64": "syscall",
"syscall.SYS_FSTATFS": "syscall",
"syscall.SYS_FSTATFS64": "syscall",
"syscall.SYS_FSTATV": "syscall",
"syscall.SYS_FSTATVFS1": "syscall",
"syscall.SYS_FSTAT_EXTENDED": "syscall",
"syscall.SYS_FSYNC": "syscall",
"syscall.SYS_FSYNC_NOCANCEL": "syscall",
"syscall.SYS_FSYNC_RANGE": "syscall",
"syscall.SYS_FTIME": "syscall",
"syscall.SYS_FTRUNCATE": "syscall",
"syscall.SYS_FTRUNCATE64": "syscall",
"syscall.SYS_FUTEX": "syscall",
"syscall.SYS_FUTIMENS": "syscall",
"syscall.SYS_FUTIMES": "syscall",
"syscall.SYS_FUTIMESAT": "syscall",
"syscall.SYS_GETATTRLIST": "syscall",
"syscall.SYS_GETAUDIT": "syscall",
"syscall.SYS_GETAUDIT_ADDR": "syscall",
"syscall.SYS_GETAUID": "syscall",
"syscall.SYS_GETCONTEXT": "syscall",
"syscall.SYS_GETCPU": "syscall",
"syscall.SYS_GETCWD": "syscall",
"syscall.SYS_GETDENTS": "syscall",
"syscall.SYS_GETDENTS64": "syscall",
"syscall.SYS_GETDIRENTRIES": "syscall",
"syscall.SYS_GETDIRENTRIES64": "syscall",
"syscall.SYS_GETDIRENTRIESATTR": "syscall",
"syscall.SYS_GETDTABLECOUNT": "syscall",
"syscall.SYS_GETDTABLESIZE": "syscall",
"syscall.SYS_GETEGID": "syscall",
"syscall.SYS_GETEGID32": "syscall",
"syscall.SYS_GETEUID": "syscall",
"syscall.SYS_GETEUID32": "syscall",
"syscall.SYS_GETFH": "syscall",
"syscall.SYS_GETFSSTAT": "syscall",
"syscall.SYS_GETFSSTAT64": "syscall",
"syscall.SYS_GETGID": "syscall",
"syscall.SYS_GETGID32": "syscall",
"syscall.SYS_GETGROUPS": "syscall",
"syscall.SYS_GETGROUPS32": "syscall",
"syscall.SYS_GETHOSTUUID": "syscall",
"syscall.SYS_GETITIMER": "syscall",
"syscall.SYS_GETLCID": "syscall",
"syscall.SYS_GETLOGIN": "syscall",
"syscall.SYS_GETLOGINCLASS": "syscall",
"syscall.SYS_GETPEERNAME": "syscall",
"syscall.SYS_GETPGID": "syscall",
"syscall.SYS_GETPGRP": "syscall",
"syscall.SYS_GETPID": "syscall",
"syscall.SYS_GETPMSG": "syscall",
"syscall.SYS_GETPPID": "syscall",
"syscall.SYS_GETPRIORITY": "syscall",
"syscall.SYS_GETRESGID": "syscall",
"syscall.SYS_GETRESGID32": "syscall",
"syscall.SYS_GETRESUID": "syscall",
"syscall.SYS_GETRESUID32": "syscall",
"syscall.SYS_GETRLIMIT": "syscall",
"syscall.SYS_GETRTABLE": "syscall",
"syscall.SYS_GETRUSAGE": "syscall",
"syscall.SYS_GETSGROUPS": "syscall",
"syscall.SYS_GETSID": "syscall",
"syscall.SYS_GETSOCKNAME": "syscall",
"syscall.SYS_GETSOCKOPT": "syscall",
"syscall.SYS_GETTHRID": "syscall",
"syscall.SYS_GETTID": "syscall",
"syscall.SYS_GETTIMEOFDAY": "syscall",
"syscall.SYS_GETUID": "syscall",
"syscall.SYS_GETUID32": "syscall",
"syscall.SYS_GETVFSSTAT": "syscall",
"syscall.SYS_GETWGROUPS": "syscall",
"syscall.SYS_GETXATTR": "syscall",
"syscall.SYS_GET_KERNEL_SYMS": "syscall",
"syscall.SYS_GET_MEMPOLICY": "syscall",
"syscall.SYS_GET_ROBUST_LIST": "syscall",
"syscall.SYS_GET_THREAD_AREA": "syscall",
"syscall.SYS_GTTY": "syscall",
"syscall.SYS_IDENTITYSVC": "syscall",
"syscall.SYS_IDLE": "syscall",
"syscall.SYS_INITGROUPS": "syscall",
"syscall.SYS_INIT_MODULE": "syscall",
"syscall.SYS_INOTIFY_ADD_WATCH": "syscall",
"syscall.SYS_INOTIFY_INIT": "syscall",
"syscall.SYS_INOTIFY_INIT1": "syscall",
"syscall.SYS_INOTIFY_RM_WATCH": "syscall",
"syscall.SYS_IOCTL": "syscall",
"syscall.SYS_IOPERM": "syscall",
"syscall.SYS_IOPL": "syscall",
"syscall.SYS_IOPOLICYSYS": "syscall",
"syscall.SYS_IOPRIO_GET": "syscall",
"syscall.SYS_IOPRIO_SET": "syscall",
"syscall.SYS_IO_CANCEL": "syscall",
"syscall.SYS_IO_DESTROY": "syscall",
"syscall.SYS_IO_GETEVENTS": "syscall",
"syscall.SYS_IO_SETUP": "syscall",
"syscall.SYS_IO_SUBMIT": "syscall",
"syscall.SYS_IPC": "syscall",
"syscall.SYS_ISSETUGID": "syscall",
"syscall.SYS_JAIL": "syscall",
"syscall.SYS_JAIL_ATTACH": "syscall",
"syscall.SYS_JAIL_GET": "syscall",
"syscall.SYS_JAIL_REMOVE": "syscall",
"syscall.SYS_JAIL_SET": "syscall",
"syscall.SYS_KDEBUG_TRACE": "syscall",
"syscall.SYS_KENV": "syscall",
"syscall.SYS_KEVENT": "syscall",
"syscall.SYS_KEVENT64": "syscall",
"syscall.SYS_KEXEC_LOAD": "syscall",
"syscall.SYS_KEYCTL": "syscall",
"syscall.SYS_KILL": "syscall",
"syscall.SYS_KLDFIND": "syscall",
"syscall.SYS_KLDFIRSTMOD": "syscall",
"syscall.SYS_KLDLOAD": "syscall",
"syscall.SYS_KLDNEXT": "syscall",
"syscall.SYS_KLDSTAT": "syscall",
"syscall.SYS_KLDSYM": "syscall",
"syscall.SYS_KLDUNLOAD": "syscall",
"syscall.SYS_KLDUNLOADF": "syscall",
"syscall.SYS_KQUEUE": "syscall",
"syscall.SYS_KQUEUE1": "syscall",
"syscall.SYS_KTIMER_CREATE": "syscall",
"syscall.SYS_KTIMER_DELETE": "syscall",
"syscall.SYS_KTIMER_GETOVERRUN": "syscall",
"syscall.SYS_KTIMER_GETTIME": "syscall",
"syscall.SYS_KTIMER_SETTIME": "syscall",
"syscall.SYS_KTRACE": "syscall",
"syscall.SYS_LCHFLAGS": "syscall",
"syscall.SYS_LCHMOD": "syscall",
"syscall.SYS_LCHOWN": "syscall",
"syscall.SYS_LCHOWN32": "syscall",
"syscall.SYS_LGETFH": "syscall",
"syscall.SYS_LGETXATTR": "syscall",
"syscall.SYS_LINK": "syscall",
"syscall.SYS_LINKAT": "syscall",
"syscall.SYS_LIO_LISTIO": "syscall",
"syscall.SYS_LISTEN": "syscall",
"syscall.SYS_LISTXATTR": "syscall",
"syscall.SYS_LLISTXATTR": "syscall",
"syscall.SYS_LOCK": "syscall",
"syscall.SYS_LOOKUP_DCOOKIE": "syscall",
"syscall.SYS_LPATHCONF": "syscall",
"syscall.SYS_LREMOVEXATTR": "syscall",
"syscall.SYS_LSEEK": "syscall",
"syscall.SYS_LSETXATTR": "syscall",
"syscall.SYS_LSTAT": "syscall",
"syscall.SYS_LSTAT64": "syscall",
"syscall.SYS_LSTAT64_EXTENDED": "syscall",
"syscall.SYS_LSTATV": "syscall",
"syscall.SYS_LSTAT_EXTENDED": "syscall",
"syscall.SYS_LUTIMES": "syscall",
"syscall.SYS_MAC_SYSCALL": "syscall",
"syscall.SYS_MADVISE": "syscall",
"syscall.SYS_MADVISE1": "syscall",
"syscall.SYS_MAXSYSCALL": "syscall",
"syscall.SYS_MBIND": "syscall",
"syscall.SYS_MIGRATE_PAGES": "syscall",
"syscall.SYS_MINCORE": "syscall",
"syscall.SYS_MINHERIT": "syscall",
"syscall.SYS_MKCOMPLEX": "syscall",
"syscall.SYS_MKDIR": "syscall",
"syscall.SYS_MKDIRAT": "syscall",
"syscall.SYS_MKDIR_EXTENDED": "syscall",
"syscall.SYS_MKFIFO": "syscall",
"syscall.SYS_MKFIFOAT": "syscall",
"syscall.SYS_MKFIFO_EXTENDED": "syscall",
"syscall.SYS_MKNOD": "syscall",
"syscall.SYS_MKNODAT": "syscall",
"syscall.SYS_MLOCK": "syscall",
"syscall.SYS_MLOCKALL": "syscall",
"syscall.SYS_MMAP": "syscall",
"syscall.SYS_MMAP2": "syscall",
"syscall.SYS_MODCTL": "syscall",
"syscall.SYS_MODFIND": "syscall",
"syscall.SYS_MODFNEXT": "syscall",
"syscall.SYS_MODIFY_LDT": "syscall",
"syscall.SYS_MODNEXT": "syscall",
"syscall.SYS_MODSTAT": "syscall",
"syscall.SYS_MODWATCH": "syscall",
"syscall.SYS_MOUNT": "syscall",
"syscall.SYS_MOVE_PAGES": "syscall",
"syscall.SYS_MPROTECT": "syscall",
"syscall.SYS_MPX": "syscall",
"syscall.SYS_MQUERY": "syscall",
"syscall.SYS_MQ_GETSETATTR": "syscall",
"syscall.SYS_MQ_NOTIFY": "syscall",
"syscall.SYS_MQ_OPEN": "syscall",
"syscall.SYS_MQ_TIMEDRECEIVE": "syscall",
"syscall.SYS_MQ_TIMEDSEND": "syscall",
"syscall.SYS_MQ_UNLINK": "syscall",
"syscall.SYS_MREMAP": "syscall",
"syscall.SYS_MSGCTL": "syscall",
"syscall.SYS_MSGGET": "syscall",
"syscall.SYS_MSGRCV": "syscall",
"syscall.SYS_MSGRCV_NOCANCEL": "syscall",
"syscall.SYS_MSGSND": "syscall",
"syscall.SYS_MSGSND_NOCANCEL": "syscall",
"syscall.SYS_MSGSYS": "syscall",
"syscall.SYS_MSYNC": "syscall",
"syscall.SYS_MSYNC_NOCANCEL": "syscall",
"syscall.SYS_MUNLOCK": "syscall",
"syscall.SYS_MUNLOCKALL": "syscall",
"syscall.SYS_MUNMAP": "syscall",
"syscall.SYS_NAME_TO_HANDLE_AT": "syscall",
"syscall.SYS_NANOSLEEP": "syscall",
"syscall.SYS_NEWFSTATAT": "syscall",
"syscall.SYS_NFSCLNT": "syscall",
"syscall.SYS_NFSSERVCTL": "syscall",
"syscall.SYS_NFSSVC": "syscall",
"syscall.SYS_NFSTAT": "syscall",
"syscall.SYS_NICE": "syscall",
"syscall.SYS_NLSTAT": "syscall",
"syscall.SYS_NMOUNT": "syscall",
"syscall.SYS_NSTAT": "syscall",
"syscall.SYS_NTP_ADJTIME": "syscall",
"syscall.SYS_NTP_GETTIME": "syscall",
"syscall.SYS_OABI_SYSCALL_BASE": "syscall",
"syscall.SYS_OBREAK": "syscall",
"syscall.SYS_OLDFSTAT": "syscall",
"syscall.SYS_OLDLSTAT": "syscall",
"syscall.SYS_OLDOLDUNAME": "syscall",
"syscall.SYS_OLDSTAT": "syscall",
"syscall.SYS_OLDUNAME": "syscall",
"syscall.SYS_OPEN": "syscall",
"syscall.SYS_OPENAT": "syscall",
"syscall.SYS_OPENBSD_POLL": "syscall",
"syscall.SYS_OPEN_BY_HANDLE_AT": "syscall",
"syscall.SYS_OPEN_EXTENDED": "syscall",
"syscall.SYS_OPEN_NOCANCEL": "syscall",
"syscall.SYS_OVADVISE": "syscall",
"syscall.SYS_PACCEPT": "syscall",
"syscall.SYS_PATHCONF": "syscall",
"syscall.SYS_PAUSE": "syscall",
"syscall.SYS_PCICONFIG_IOBASE": "syscall",
"syscall.SYS_PCICONFIG_READ": "syscall",
"syscall.SYS_PCICONFIG_WRITE": "syscall",
"syscall.SYS_PDFORK": "syscall",
"syscall.SYS_PDGETPID": "syscall",
"syscall.SYS_PDKILL": "syscall",
"syscall.SYS_PERF_EVENT_OPEN": "syscall",
"syscall.SYS_PERSONALITY": "syscall",
"syscall.SYS_PID_HIBERNATE": "syscall",
"syscall.SYS_PID_RESUME": "syscall",
"syscall.SYS_PID_SHUTDOWN_SOCKETS": "syscall",
"syscall.SYS_PID_SUSPEND": "syscall",
"syscall.SYS_PIPE": "syscall",
"syscall.SYS_PIPE2": "syscall",
"syscall.SYS_PIVOT_ROOT": "syscall",
"syscall.SYS_PMC_CONTROL": "syscall",
"syscall.SYS_PMC_GET_INFO": "syscall",
"syscall.SYS_POLL": "syscall",
"syscall.SYS_POLLTS": "syscall",
"syscall.SYS_POLL_NOCANCEL": "syscall",
"syscall.SYS_POSIX_FADVISE": "syscall",
"syscall.SYS_POSIX_FALLOCATE": "syscall",
"syscall.SYS_POSIX_OPENPT": "syscall",
"syscall.SYS_POSIX_SPAWN": "syscall",
"syscall.SYS_PPOLL": "syscall",
"syscall.SYS_PRCTL": "syscall",
"syscall.SYS_PREAD": "syscall",
"syscall.SYS_PREAD64": "syscall",
"syscall.SYS_PREADV": "syscall",
"syscall.SYS_PREAD_NOCANCEL": "syscall",
"syscall.SYS_PRLIMIT64": "syscall",
"syscall.SYS_PROCCTL": "syscall",
"syscall.SYS_PROCESS_POLICY": "syscall",
"syscall.SYS_PROCESS_VM_READV": "syscall",
"syscall.SYS_PROCESS_VM_WRITEV": "syscall",
"syscall.SYS_PROC_INFO": "syscall",
"syscall.SYS_PROF": "syscall",
"syscall.SYS_PROFIL": "syscall",
"syscall.SYS_PSELECT": "syscall",
"syscall.SYS_PSELECT6": "syscall",
"syscall.SYS_PSET_ASSIGN": "syscall",
"syscall.SYS_PSET_CREATE": "syscall",
"syscall.SYS_PSET_DESTROY": "syscall",
"syscall.SYS_PSYNCH_CVBROAD": "syscall",
"syscall.SYS_PSYNCH_CVCLRPREPOST": "syscall",
"syscall.SYS_PSYNCH_CVSIGNAL": "syscall",
"syscall.SYS_PSYNCH_CVWAIT": "syscall",
"syscall.SYS_PSYNCH_MUTEXDROP": "syscall",
"syscall.SYS_PSYNCH_MUTEXWAIT": "syscall",
"syscall.SYS_PSYNCH_RW_DOWNGRADE": "syscall",
"syscall.SYS_PSYNCH_RW_LONGRDLOCK": "syscall",
"syscall.SYS_PSYNCH_RW_RDLOCK": "syscall",
"syscall.SYS_PSYNCH_RW_UNLOCK": "syscall",
"syscall.SYS_PSYNCH_RW_UNLOCK2": "syscall",
"syscall.SYS_PSYNCH_RW_UPGRADE": "syscall",
"syscall.SYS_PSYNCH_RW_WRLOCK": "syscall",
"syscall.SYS_PSYNCH_RW_YIELDWRLOCK": "syscall",
"syscall.SYS_PTRACE": "syscall",
"syscall.SYS_PUTPMSG": "syscall",
"syscall.SYS_PWRITE": "syscall",
"syscall.SYS_PWRITE64": "syscall",
"syscall.SYS_PWRITEV": "syscall",
"syscall.SYS_PWRITE_NOCANCEL": "syscall",
"syscall.SYS_QUERY_MODULE": "syscall",
"syscall.SYS_QUOTACTL": "syscall",
"syscall.SYS_RASCTL": "syscall",
"syscall.SYS_RCTL_ADD_RULE": "syscall",
"syscall.SYS_RCTL_GET_LIMITS": "syscall",
"syscall.SYS_RCTL_GET_RACCT": "syscall",
"syscall.SYS_RCTL_GET_RULES": "syscall",
"syscall.SYS_RCTL_REMOVE_RULE": "syscall",
"syscall.SYS_READ": "syscall",
"syscall.SYS_READAHEAD": "syscall",
"syscall.SYS_READDIR": "syscall",
"syscall.SYS_READLINK": "syscall",
"syscall.SYS_READLINKAT": "syscall",
"syscall.SYS_READV": "syscall",
"syscall.SYS_READV_NOCANCEL": "syscall",
"syscall.SYS_READ_NOCANCEL": "syscall",
"syscall.SYS_REBOOT": "syscall",
"syscall.SYS_RECV": "syscall",
"syscall.SYS_RECVFROM": "syscall",
"syscall.SYS_RECVFROM_NOCANCEL": "syscall",
"syscall.SYS_RECVMMSG": "syscall",
"syscall.SYS_RECVMSG": "syscall",
"syscall.SYS_RECVMSG_NOCANCEL": "syscall",
"syscall.SYS_REMAP_FILE_PAGES": "syscall",
"syscall.SYS_REMOVEXATTR": "syscall",
"syscall.SYS_RENAME": "syscall",
"syscall.SYS_RENAMEAT": "syscall",
"syscall.SYS_REQUEST_KEY": "syscall",
"syscall.SYS_RESTART_SYSCALL": "syscall",
"syscall.SYS_REVOKE": "syscall",
"syscall.SYS_RFORK": "syscall",
"syscall.SYS_RMDIR": "syscall",
"syscall.SYS_RTPRIO": "syscall",
"syscall.SYS_RTPRIO_THREAD": "syscall",
"syscall.SYS_RT_SIGACTION": "syscall",
"syscall.SYS_RT_SIGPENDING": "syscall",
"syscall.SYS_RT_SIGPROCMASK": "syscall",
"syscall.SYS_RT_SIGQUEUEINFO": "syscall",
"syscall.SYS_RT_SIGRETURN": "syscall",
"syscall.SYS_RT_SIGSUSPEND": "syscall",
"syscall.SYS_RT_SIGTIMEDWAIT": "syscall",
"syscall.SYS_RT_TGSIGQUEUEINFO": "syscall",
"syscall.SYS_SBRK": "syscall",
"syscall.SYS_SCHED_GETAFFINITY": "syscall",
"syscall.SYS_SCHED_GETPARAM": "syscall",
"syscall.SYS_SCHED_GETSCHEDULER": "syscall",
"syscall.SYS_SCHED_GET_PRIORITY_MAX": "syscall",
"syscall.SYS_SCHED_GET_PRIORITY_MIN": "syscall",
"syscall.SYS_SCHED_RR_GET_INTERVAL": "syscall",
"syscall.SYS_SCHED_SETAFFINITY": "syscall",
"syscall.SYS_SCHED_SETPARAM": "syscall",
"syscall.SYS_SCHED_SETSCHEDULER": "syscall",
"syscall.SYS_SCHED_YIELD": "syscall",
"syscall.SYS_SCTP_GENERIC_RECVMSG": "syscall",
"syscall.SYS_SCTP_GENERIC_SENDMSG": "syscall",
"syscall.SYS_SCTP_GENERIC_SENDMSG_IOV": "syscall",
"syscall.SYS_SCTP_PEELOFF": "syscall",
"syscall.SYS_SEARCHFS": "syscall",
"syscall.SYS_SECURITY": "syscall",
"syscall.SYS_SELECT": "syscall",
"syscall.SYS_SELECT_NOCANCEL": "syscall",
"syscall.SYS_SEMCONFIG": "syscall",
"syscall.SYS_SEMCTL": "syscall",
"syscall.SYS_SEMGET": "syscall",
"syscall.SYS_SEMOP": "syscall",
"syscall.SYS_SEMSYS": "syscall",
"syscall.SYS_SEMTIMEDOP": "syscall",
"syscall.SYS_SEM_CLOSE": "syscall",
"syscall.SYS_SEM_DESTROY": "syscall",
"syscall.SYS_SEM_GETVALUE": "syscall",
"syscall.SYS_SEM_INIT": "syscall",
"syscall.SYS_SEM_OPEN": "syscall",
"syscall.SYS_SEM_POST": "syscall",
"syscall.SYS_SEM_TRYWAIT": "syscall",
"syscall.SYS_SEM_UNLINK": "syscall",
"syscall.SYS_SEM_WAIT": "syscall",
"syscall.SYS_SEM_WAIT_NOCANCEL": "syscall",
"syscall.SYS_SEND": "syscall",
"syscall.SYS_SENDFILE": "syscall",
"syscall.SYS_SENDFILE64": "syscall",
"syscall.SYS_SENDMMSG": "syscall",
"syscall.SYS_SENDMSG": "syscall",
"syscall.SYS_SENDMSG_NOCANCEL": "syscall",
"syscall.SYS_SENDTO": "syscall",
"syscall.SYS_SENDTO_NOCANCEL": "syscall",
"syscall.SYS_SETATTRLIST": "syscall",
"syscall.SYS_SETAUDIT": "syscall",
"syscall.SYS_SETAUDIT_ADDR": "syscall",
"syscall.SYS_SETAUID": "syscall",
"syscall.SYS_SETCONTEXT": "syscall",
"syscall.SYS_SETDOMAINNAME": "syscall",
"syscall.SYS_SETEGID": "syscall",
"syscall.SYS_SETEUID": "syscall",
"syscall.SYS_SETFIB": "syscall",
"syscall.SYS_SETFSGID": "syscall",
"syscall.SYS_SETFSGID32": "syscall",
"syscall.SYS_SETFSUID": "syscall",
"syscall.SYS_SETFSUID32": "syscall",
"syscall.SYS_SETGID": "syscall",
"syscall.SYS_SETGID32": "syscall",
"syscall.SYS_SETGROUPS": "syscall",
"syscall.SYS_SETGROUPS32": "syscall",
"syscall.SYS_SETHOSTNAME": "syscall",
"syscall.SYS_SETITIMER": "syscall",
"syscall.SYS_SETLCID": "syscall",
"syscall.SYS_SETLOGIN": "syscall",
"syscall.SYS_SETLOGINCLASS": "syscall",
"syscall.SYS_SETNS": "syscall",
"syscall.SYS_SETPGID": "syscall",
"syscall.SYS_SETPRIORITY": "syscall",
"syscall.SYS_SETPRIVEXEC": "syscall",
"syscall.SYS_SETREGID": "syscall",
"syscall.SYS_SETREGID32": "syscall",
"syscall.SYS_SETRESGID": "syscall",
"syscall.SYS_SETRESGID32": "syscall",
"syscall.SYS_SETRESUID": "syscall",
"syscall.SYS_SETRESUID32": "syscall",
"syscall.SYS_SETREUID": "syscall",
"syscall.SYS_SETREUID32": "syscall",
"syscall.SYS_SETRLIMIT": "syscall",
"syscall.SYS_SETRTABLE": "syscall",
"syscall.SYS_SETSGROUPS": "syscall",
"syscall.SYS_SETSID": "syscall",
"syscall.SYS_SETSOCKOPT": "syscall",
"syscall.SYS_SETTID": "syscall",
"syscall.SYS_SETTID_WITH_PID": "syscall",
"syscall.SYS_SETTIMEOFDAY": "syscall",
"syscall.SYS_SETUID": "syscall",
"syscall.SYS_SETUID32": "syscall",
"syscall.SYS_SETWGROUPS": "syscall",
"syscall.SYS_SETXATTR": "syscall",
"syscall.SYS_SET_MEMPOLICY": "syscall",
"syscall.SYS_SET_ROBUST_LIST": "syscall",
"syscall.SYS_SET_THREAD_AREA": "syscall",
"syscall.SYS_SET_TID_ADDRESS": "syscall",
"syscall.SYS_SGETMASK": "syscall",
"syscall.SYS_SHARED_REGION_CHECK_NP": "syscall",
"syscall.SYS_SHARED_REGION_MAP_AND_SLIDE_NP": "syscall",
"syscall.SYS_SHMAT": "syscall",
"syscall.SYS_SHMCTL": "syscall",
"syscall.SYS_SHMDT": "syscall",
"syscall.SYS_SHMGET": "syscall",
"syscall.SYS_SHMSYS": "syscall",
"syscall.SYS_SHM_OPEN": "syscall",
"syscall.SYS_SHM_UNLINK": "syscall",
"syscall.SYS_SHUTDOWN": "syscall",
"syscall.SYS_SIGACTION": "syscall",
"syscall.SYS_SIGALTSTACK": "syscall",
"syscall.SYS_SIGNAL": "syscall",
"syscall.SYS_SIGNALFD": "syscall",
"syscall.SYS_SIGNALFD4": "syscall",
"syscall.SYS_SIGPENDING": "syscall",
"syscall.SYS_SIGPROCMASK": "syscall",
"syscall.SYS_SIGQUEUE": "syscall",
"syscall.SYS_SIGQUEUEINFO": "syscall",
"syscall.SYS_SIGRETURN": "syscall",
"syscall.SYS_SIGSUSPEND": "syscall",
"syscall.SYS_SIGSUSPEND_NOCANCEL": "syscall",
"syscall.SYS_SIGTIMEDWAIT": "syscall",
"syscall.SYS_SIGWAIT": "syscall",
"syscall.SYS_SIGWAITINFO": "syscall",
"syscall.SYS_SOCKET": "syscall",
"syscall.SYS_SOCKETCALL": "syscall",
"syscall.SYS_SOCKETPAIR": "syscall",
"syscall.SYS_SPLICE": "syscall",
"syscall.SYS_SSETMASK": "syscall",
"syscall.SYS_SSTK": "syscall",
"syscall.SYS_STACK_SNAPSHOT": "syscall",
"syscall.SYS_STAT": "syscall",
"syscall.SYS_STAT64": "syscall",
"syscall.SYS_STAT64_EXTENDED": "syscall",
"syscall.SYS_STATFS": "syscall",
"syscall.SYS_STATFS64": "syscall",
"syscall.SYS_STATV": "syscall",
"syscall.SYS_STATVFS1": "syscall",
"syscall.SYS_STAT_EXTENDED": "syscall",
"syscall.SYS_STIME": "syscall",
"syscall.SYS_STTY": "syscall",
"syscall.SYS_SWAPCONTEXT": "syscall",
"syscall.SYS_SWAPCTL": "syscall",
"syscall.SYS_SWAPOFF": "syscall",
"syscall.SYS_SWAPON": "syscall",
"syscall.SYS_SYMLINK": "syscall",
"syscall.SYS_SYMLINKAT": "syscall",
"syscall.SYS_SYNC": "syscall",
"syscall.SYS_SYNCFS": "syscall",
"syscall.SYS_SYNC_FILE_RANGE": "syscall",
"syscall.SYS_SYSARCH": "syscall",
"syscall.SYS_SYSCALL": "syscall",
"syscall.SYS_SYSCALL_BASE": "syscall",
"syscall.SYS_SYSFS": "syscall",
"syscall.SYS_SYSINFO": "syscall",
"syscall.SYS_SYSLOG": "syscall",
"syscall.SYS_TEE": "syscall",
"syscall.SYS_TGKILL": "syscall",
"syscall.SYS_THREAD_SELFID": "syscall",
"syscall.SYS_THR_CREATE": "syscall",
"syscall.SYS_THR_EXIT": "syscall",
"syscall.SYS_THR_KILL": "syscall",
"syscall.SYS_THR_KILL2": "syscall",
"syscall.SYS_THR_NEW": "syscall",
"syscall.SYS_THR_SELF": "syscall",
"syscall.SYS_THR_SET_NAME": "syscall",
"syscall.SYS_THR_SUSPEND": "syscall",
"syscall.SYS_THR_WAKE": "syscall",
"syscall.SYS_TIME": "syscall",
"syscall.SYS_TIMERFD_CREATE": "syscall",
"syscall.SYS_TIMERFD_GETTIME": "syscall",
"syscall.SYS_TIMERFD_SETTIME": "syscall",
"syscall.SYS_TIMER_CREATE": "syscall",
"syscall.SYS_TIMER_DELETE": "syscall",
"syscall.SYS_TIMER_GETOVERRUN": "syscall",
"syscall.SYS_TIMER_GETTIME": "syscall",
"syscall.SYS_TIMER_SETTIME": "syscall",
"syscall.SYS_TIMES": "syscall",
"syscall.SYS_TKILL": "syscall",
"syscall.SYS_TRUNCATE": "syscall",
"syscall.SYS_TRUNCATE64": "syscall",
"syscall.SYS_TUXCALL": "syscall",
"syscall.SYS_UGETRLIMIT": "syscall",
"syscall.SYS_ULIMIT": "syscall",
"syscall.SYS_UMASK": "syscall",
"syscall.SYS_UMASK_EXTENDED": "syscall",
"syscall.SYS_UMOUNT": "syscall",
"syscall.SYS_UMOUNT2": "syscall",
"syscall.SYS_UNAME": "syscall",
"syscall.SYS_UNDELETE": "syscall",
"syscall.SYS_UNLINK": "syscall",
"syscall.SYS_UNLINKAT": "syscall",
"syscall.SYS_UNMOUNT": "syscall",
"syscall.SYS_UNSHARE": "syscall",
"syscall.SYS_USELIB": "syscall",
"syscall.SYS_USTAT": "syscall",
"syscall.SYS_UTIME": "syscall",
"syscall.SYS_UTIMENSAT": "syscall",
"syscall.SYS_UTIMES": "syscall",
"syscall.SYS_UTRACE": "syscall",
"syscall.SYS_UUIDGEN": "syscall",
"syscall.SYS_VADVISE": "syscall",
"syscall.SYS_VFORK": "syscall",
"syscall.SYS_VHANGUP": "syscall",
"syscall.SYS_VM86": "syscall",
"syscall.SYS_VM86OLD": "syscall",
"syscall.SYS_VMSPLICE": "syscall",
"syscall.SYS_VM_PRESSURE_MONITOR": "syscall",
"syscall.SYS_VSERVER": "syscall",
"syscall.SYS_WAIT4": "syscall",
"syscall.SYS_WAIT4_NOCANCEL": "syscall",
"syscall.SYS_WAIT6": "syscall",
"syscall.SYS_WAITEVENT": "syscall",
"syscall.SYS_WAITID": "syscall",
"syscall.SYS_WAITID_NOCANCEL": "syscall",
"syscall.SYS_WAITPID": "syscall",
"syscall.SYS_WATCHEVENT": "syscall",
"syscall.SYS_WORKQ_KERNRETURN": "syscall",
"syscall.SYS_WORKQ_OPEN": "syscall",
"syscall.SYS_WRITE": "syscall",
"syscall.SYS_WRITEV": "syscall",
"syscall.SYS_WRITEV_NOCANCEL": "syscall",
"syscall.SYS_WRITE_NOCANCEL": "syscall",
"syscall.SYS_YIELD": "syscall",
"syscall.SYS__LLSEEK": "syscall",
"syscall.SYS__LWP_CONTINUE": "syscall",
"syscall.SYS__LWP_CREATE": "syscall",
"syscall.SYS__LWP_CTL": "syscall",
"syscall.SYS__LWP_DETACH": "syscall",
"syscall.SYS__LWP_EXIT": "syscall",
"syscall.SYS__LWP_GETNAME": "syscall",
"syscall.SYS__LWP_GETPRIVATE": "syscall",
"syscall.SYS__LWP_KILL": "syscall",
"syscall.SYS__LWP_PARK": "syscall",
"syscall.SYS__LWP_SELF": "syscall",
"syscall.SYS__LWP_SETNAME": "syscall",
"syscall.SYS__LWP_SETPRIVATE": "syscall",
"syscall.SYS__LWP_SUSPEND": "syscall",
"syscall.SYS__LWP_UNPARK": "syscall",
"syscall.SYS__LWP_UNPARK_ALL": "syscall",
"syscall.SYS__LWP_WAIT": "syscall",
"syscall.SYS__LWP_WAKEUP": "syscall",
"syscall.SYS__NEWSELECT": "syscall",
"syscall.SYS__PSET_BIND": "syscall",
"syscall.SYS__SCHED_GETAFFINITY": "syscall",
"syscall.SYS__SCHED_GETPARAM": "syscall",
"syscall.SYS__SCHED_SETAFFINITY": "syscall",
"syscall.SYS__SCHED_SETPARAM": "syscall",
"syscall.SYS__SYSCTL": "syscall",
"syscall.SYS__UMTX_LOCK": "syscall",
"syscall.SYS__UMTX_OP": "syscall",
"syscall.SYS__UMTX_UNLOCK": "syscall",
"syscall.SYS___ACL_ACLCHECK_FD": "syscall",
"syscall.SYS___ACL_ACLCHECK_FILE": "syscall",
"syscall.SYS___ACL_ACLCHECK_LINK": "syscall",
"syscall.SYS___ACL_DELETE_FD": "syscall",
"syscall.SYS___ACL_DELETE_FILE": "syscall",
"syscall.SYS___ACL_DELETE_LINK": "syscall",
"syscall.SYS___ACL_GET_FD": "syscall",
"syscall.SYS___ACL_GET_FILE": "syscall",
"syscall.SYS___ACL_GET_LINK": "syscall",
"syscall.SYS___ACL_SET_FD": "syscall",
"syscall.SYS___ACL_SET_FILE": "syscall",
"syscall.SYS___ACL_SET_LINK": "syscall",
"syscall.SYS___CLONE": "syscall",
"syscall.SYS___DISABLE_THREADSIGNAL": "syscall",
"syscall.SYS___GETCWD": "syscall",
"syscall.SYS___GETLOGIN": "syscall",
"syscall.SYS___GET_TCB": "syscall",
"syscall.SYS___MAC_EXECVE": "syscall",
"syscall.SYS___MAC_GETFSSTAT": "syscall",
"syscall.SYS___MAC_GET_FD": "syscall",
"syscall.SYS___MAC_GET_FILE": "syscall",
"syscall.SYS___MAC_GET_LCID": "syscall",
"syscall.SYS___MAC_GET_LCTX": "syscall",
"syscall.SYS___MAC_GET_LINK": "syscall",
"syscall.SYS___MAC_GET_MOUNT": "syscall",
"syscall.SYS___MAC_GET_PID": "syscall",
"syscall.SYS___MAC_GET_PROC": "syscall",
"syscall.SYS___MAC_MOUNT": "syscall",
"syscall.SYS___MAC_SET_FD": "syscall",
"syscall.SYS___MAC_SET_FILE": "syscall",
"syscall.SYS___MAC_SET_LCTX": "syscall",
"syscall.SYS___MAC_SET_LINK": "syscall",
"syscall.SYS___MAC_SET_PROC": "syscall",
"syscall.SYS___MAC_SYSCALL": "syscall",
"syscall.SYS___OLD_SEMWAIT_SIGNAL": "syscall",
"syscall.SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL": "syscall",
"syscall.SYS___POSIX_CHOWN": "syscall",
"syscall.SYS___POSIX_FCHOWN": "syscall",
"syscall.SYS___POSIX_LCHOWN": "syscall",
"syscall.SYS___POSIX_RENAME": "syscall",
"syscall.SYS___PTHREAD_CANCELED": "syscall",
"syscall.SYS___PTHREAD_CHDIR": "syscall",
"syscall.SYS___PTHREAD_FCHDIR": "syscall",
"syscall.SYS___PTHREAD_KILL": "syscall",
"syscall.SYS___PTHREAD_MARKCANCEL": "syscall",
"syscall.SYS___PTHREAD_SIGMASK": "syscall",
"syscall.SYS___QUOTACTL": "syscall",
"syscall.SYS___SEMCTL": "syscall",
"syscall.SYS___SEMWAIT_SIGNAL": "syscall",
"syscall.SYS___SEMWAIT_SIGNAL_NOCANCEL": "syscall",
"syscall.SYS___SETLOGIN": "syscall",
"syscall.SYS___SETUGID": "syscall",
"syscall.SYS___SET_TCB": "syscall",
"syscall.SYS___SIGACTION_SIGTRAMP": "syscall",
"syscall.SYS___SIGTIMEDWAIT": "syscall",
"syscall.SYS___SIGWAIT": "syscall",
"syscall.SYS___SIGWAIT_NOCANCEL": "syscall",
"syscall.SYS___SYSCTL": "syscall",
"syscall.SYS___TFORK": "syscall",
"syscall.SYS___THREXIT": "syscall",
"syscall.SYS___THRSIGDIVERT": "syscall",
"syscall.SYS___THRSLEEP": "syscall",
"syscall.SYS___THRWAKEUP": "syscall",
"syscall.S_ARCH1": "syscall",
"syscall.S_ARCH2": "syscall",
"syscall.S_BLKSIZE": "syscall",
"syscall.S_IEXEC": "syscall",
"syscall.S_IFBLK": "syscall",
"syscall.S_IFCHR": "syscall",
"syscall.S_IFDIR": "syscall",
"syscall.S_IFIFO": "syscall",
"syscall.S_IFLNK": "syscall",
"syscall.S_IFMT": "syscall",
"syscall.S_IFREG": "syscall",
"syscall.S_IFSOCK": "syscall",
"syscall.S_IFWHT": "syscall",
"syscall.S_IREAD": "syscall",
"syscall.S_IRGRP": "syscall",
"syscall.S_IROTH": "syscall",
"syscall.S_IRUSR": "syscall",
"syscall.S_IRWXG": "syscall",
"syscall.S_IRWXO": "syscall",
"syscall.S_IRWXU": "syscall",
"syscall.S_ISGID": "syscall",
"syscall.S_ISTXT": "syscall",
"syscall.S_ISUID": "syscall",
"syscall.S_ISVTX": "syscall",
"syscall.S_IWGRP": "syscall",
"syscall.S_IWOTH": "syscall",
"syscall.S_IWRITE": "syscall",
"syscall.S_IWUSR": "syscall",
"syscall.S_IXGRP": "syscall",
"syscall.S_IXOTH": "syscall",
"syscall.S_IXUSR": "syscall",
"syscall.S_LOGIN_SET": "syscall",
"syscall.SecurityAttributes": "syscall",
"syscall.Seek": "syscall",
"syscall.Select": "syscall",
"syscall.Sendfile": "syscall",
"syscall.Sendmsg": "syscall",
"syscall.SendmsgN": "syscall",
"syscall.Sendto": "syscall",
"syscall.Servent": "syscall",
"syscall.SetBpf": "syscall",
"syscall.SetBpfBuflen": "syscall",
"syscall.SetBpfDatalink": "syscall",
"syscall.SetBpfHeadercmpl": "syscall",
"syscall.SetBpfImmediate": "syscall",
"syscall.SetBpfInterface": "syscall",
"syscall.SetBpfPromisc": "syscall",
"syscall.SetBpfTimeout": "syscall",
"syscall.SetCurrentDirectory": "syscall",
"syscall.SetEndOfFile": "syscall",
"syscall.SetEnvironmentVariable": "syscall",
"syscall.SetFileAttributes": "syscall",
"syscall.SetFileCompletionNotificationModes": "syscall",
"syscall.SetFilePointer": "syscall",
"syscall.SetFileTime": "syscall",
"syscall.SetHandleInformation": "syscall",
"syscall.SetKevent": "syscall",
"syscall.SetLsfPromisc": "syscall",
"syscall.SetNonblock": "syscall",
"syscall.Setdomainname": "syscall",
"syscall.Setegid": "syscall",
"syscall.Setenv": "syscall",
"syscall.Seteuid": "syscall",
"syscall.Setfsgid": "syscall",
"syscall.Setfsuid": "syscall",
"syscall.Setgid": "syscall",
"syscall.Setgroups": "syscall",
"syscall.Sethostname": "syscall",
"syscall.Setlogin": "syscall",
"syscall.Setpgid": "syscall",
"syscall.Setpriority": "syscall",
"syscall.Setprivexec": "syscall",
"syscall.Setregid": "syscall",
"syscall.Setresgid": "syscall",
"syscall.Setresuid": "syscall",
"syscall.Setreuid": "syscall",
"syscall.Setrlimit": "syscall",
"syscall.Setsid": "syscall",
"syscall.Setsockopt": "syscall",
"syscall.SetsockoptByte": "syscall",
"syscall.SetsockoptICMPv6Filter": "syscall",
"syscall.SetsockoptIPMreq": "syscall",
"syscall.SetsockoptIPMreqn": "syscall",
"syscall.SetsockoptIPv6Mreq": "syscall",
"syscall.SetsockoptInet4Addr": "syscall",
"syscall.SetsockoptInt": "syscall",
"syscall.SetsockoptLinger": "syscall",
"syscall.SetsockoptString": "syscall",
"syscall.SetsockoptTimeval": "syscall",
"syscall.Settimeofday": "syscall",
"syscall.Setuid": "syscall",
"syscall.Setxattr": "syscall",
"syscall.Shutdown": "syscall",
"syscall.SidTypeAlias": "syscall",
"syscall.SidTypeComputer": "syscall",
"syscall.SidTypeDeletedAccount": "syscall",
"syscall.SidTypeDomain": "syscall",
"syscall.SidTypeGroup": "syscall",
"syscall.SidTypeInvalid": "syscall",
"syscall.SidTypeLabel": "syscall",
"syscall.SidTypeUnknown": "syscall",
"syscall.SidTypeUser": "syscall",
"syscall.SidTypeWellKnownGroup": "syscall",
"syscall.Signal": "syscall",
"syscall.SizeofBpfHdr": "syscall",
"syscall.SizeofBpfInsn": "syscall",
"syscall.SizeofBpfProgram": "syscall",
"syscall.SizeofBpfStat": "syscall",
"syscall.SizeofBpfVersion": "syscall",
"syscall.SizeofBpfZbuf": "syscall",
"syscall.SizeofBpfZbufHeader": "syscall",
"syscall.SizeofCmsghdr": "syscall",
"syscall.SizeofICMPv6Filter": "syscall",
"syscall.SizeofIPMreq": "syscall",
"syscall.SizeofIPMreqn": "syscall",
"syscall.SizeofIPv6MTUInfo": "syscall",
"syscall.SizeofIPv6Mreq": "syscall",
"syscall.SizeofIfAddrmsg": "syscall",
"syscall.SizeofIfAnnounceMsghdr": "syscall",
"syscall.SizeofIfData": "syscall",
"syscall.SizeofIfInfomsg": "syscall",
"syscall.SizeofIfMsghdr": "syscall",
"syscall.SizeofIfaMsghdr": "syscall",
"syscall.SizeofIfmaMsghdr": "syscall",
"syscall.SizeofIfmaMsghdr2": "syscall",
"syscall.SizeofInet4Pktinfo": "syscall",
"syscall.SizeofInet6Pktinfo": "syscall",
"syscall.SizeofInotifyEvent": "syscall",
"syscall.SizeofLinger": "syscall",
"syscall.SizeofMsghdr": "syscall",
"syscall.SizeofNlAttr": "syscall",
"syscall.SizeofNlMsgerr": "syscall",
"syscall.SizeofNlMsghdr": "syscall",
"syscall.SizeofRtAttr": "syscall",
"syscall.SizeofRtGenmsg": "syscall",
"syscall.SizeofRtMetrics": "syscall",
"syscall.SizeofRtMsg": "syscall",
"syscall.SizeofRtMsghdr": "syscall",
"syscall.SizeofRtNexthop": "syscall",
"syscall.SizeofSockFilter": "syscall",
"syscall.SizeofSockFprog": "syscall",
"syscall.SizeofSockaddrAny": "syscall",
"syscall.SizeofSockaddrDatalink": "syscall",
"syscall.SizeofSockaddrInet4": "syscall",
"syscall.SizeofSockaddrInet6": "syscall",
"syscall.SizeofSockaddrLinklayer": "syscall",
"syscall.SizeofSockaddrNetlink": "syscall",
"syscall.SizeofSockaddrUnix": "syscall",
"syscall.SizeofTCPInfo": "syscall",
"syscall.SizeofUcred": "syscall",
"syscall.SlicePtrFromStrings": "syscall",
"syscall.SockFilter": "syscall",
"syscall.SockFprog": "syscall",
"syscall.SockaddrDatalink": "syscall",
"syscall.SockaddrGen": "syscall",
"syscall.SockaddrInet4": "syscall",
"syscall.SockaddrInet6": "syscall",
"syscall.SockaddrLinklayer": "syscall",
"syscall.SockaddrNetlink": "syscall",
"syscall.SockaddrUnix": "syscall",
"syscall.Socket": "syscall",
"syscall.SocketControlMessage": "syscall",
"syscall.SocketDisableIPv6": "syscall",
"syscall.Socketpair": "syscall",
"syscall.Splice": "syscall",
"syscall.StartProcess": "syscall",
"syscall.StartupInfo": "syscall",
"syscall.Stat": "syscall",
"syscall.Stat_t": "syscall",
"syscall.Statfs": "syscall",
"syscall.Statfs_t": "syscall",
"syscall.Stderr": "syscall",
"syscall.Stdin": "syscall",
"syscall.Stdout": "syscall",
"syscall.StringBytePtr": "syscall",
"syscall.StringByteSlice": "syscall",
"syscall.StringSlicePtr": "syscall",
"syscall.StringToSid": "syscall",
"syscall.StringToUTF16": "syscall",
"syscall.StringToUTF16Ptr": "syscall",
"syscall.Symlink": "syscall",
"syscall.Sync": "syscall",
"syscall.SyncFileRange": "syscall",
"syscall.SysProcAttr": "syscall",
"syscall.SysProcIDMap": "syscall",
"syscall.Syscall": "syscall",
"syscall.Syscall12": "syscall",
"syscall.Syscall15": "syscall",
"syscall.Syscall6": "syscall",
"syscall.Syscall9": "syscall",
"syscall.Sysctl": "syscall",
"syscall.SysctlUint32": "syscall",
"syscall.Sysctlnode": "syscall",
"syscall.Sysinfo": "syscall",
"syscall.Sysinfo_t": "syscall",
"syscall.Systemtime": "syscall",
"syscall.TCGETS": "syscall",
"syscall.TCIFLUSH": "syscall",
"syscall.TCIOFLUSH": "syscall",
"syscall.TCOFLUSH": "syscall",
"syscall.TCPInfo": "syscall",
"syscall.TCPKeepalive": "syscall",
"syscall.TCP_CA_NAME_MAX": "syscall",
"syscall.TCP_CONGCTL": "syscall",
"syscall.TCP_CONGESTION": "syscall",
"syscall.TCP_CONNECTIONTIMEOUT": "syscall",
"syscall.TCP_CORK": "syscall",
"syscall.TCP_DEFER_ACCEPT": "syscall",
"syscall.TCP_INFO": "syscall",
"syscall.TCP_KEEPALIVE": "syscall",
"syscall.TCP_KEEPCNT": "syscall",
"syscall.TCP_KEEPIDLE": "syscall",
"syscall.TCP_KEEPINIT": "syscall",
"syscall.TCP_KEEPINTVL": "syscall",
"syscall.TCP_LINGER2": "syscall",
"syscall.TCP_MAXBURST": "syscall",
"syscall.TCP_MAXHLEN": "syscall",
"syscall.TCP_MAXOLEN": "syscall",
"syscall.TCP_MAXSEG": "syscall",
"syscall.TCP_MAXWIN": "syscall",
"syscall.TCP_MAX_SACK": "syscall",
"syscall.TCP_MAX_WINSHIFT": "syscall",
"syscall.TCP_MD5SIG": "syscall",
"syscall.TCP_MD5SIG_MAXKEYLEN": "syscall",
"syscall.TCP_MINMSS": "syscall",
"syscall.TCP_MINMSSOVERLOAD": "syscall",
"syscall.TCP_MSS": "syscall",
"syscall.TCP_NODELAY": "syscall",
"syscall.TCP_NOOPT": "syscall",
"syscall.TCP_NOPUSH": "syscall",
"syscall.TCP_NSTATES": "syscall",
"syscall.TCP_QUICKACK": "syscall",
"syscall.TCP_RXT_CONNDROPTIME": "syscall",
"syscall.TCP_RXT_FINDROP": "syscall",
"syscall.TCP_SACK_ENABLE": "syscall",
"syscall.TCP_SYNCNT": "syscall",
"syscall.TCP_VENDOR": "syscall",
"syscall.TCP_WINDOW_CLAMP": "syscall",
"syscall.TCSAFLUSH": "syscall",
"syscall.TCSETS": "syscall",
"syscall.TF_DISCONNECT": "syscall",
"syscall.TF_REUSE_SOCKET": "syscall",
"syscall.TF_USE_DEFAULT_WORKER": "syscall",
"syscall.TF_USE_KERNEL_APC": "syscall",
"syscall.TF_USE_SYSTEM_THREAD": "syscall",
"syscall.TF_WRITE_BEHIND": "syscall",
"syscall.TH32CS_INHERIT": "syscall",
"syscall.TH32CS_SNAPALL": "syscall",
"syscall.TH32CS_SNAPHEAPLIST": "syscall",
"syscall.TH32CS_SNAPMODULE": "syscall",
"syscall.TH32CS_SNAPMODULE32": "syscall",
"syscall.TH32CS_SNAPPROCESS": "syscall",
"syscall.TH32CS_SNAPTHREAD": "syscall",
"syscall.TIME_ZONE_ID_DAYLIGHT": "syscall",
"syscall.TIME_ZONE_ID_STANDARD": "syscall",
"syscall.TIME_ZONE_ID_UNKNOWN": "syscall",
"syscall.TIOCCBRK": "syscall",
"syscall.TIOCCDTR": "syscall",
"syscall.TIOCCONS": "syscall",
"syscall.TIOCDCDTIMESTAMP": "syscall",
"syscall.TIOCDRAIN": "syscall",
"syscall.TIOCDSIMICROCODE": "syscall",
"syscall.TIOCEXCL": "syscall",
"syscall.TIOCEXT": "syscall",
"syscall.TIOCFLAG_CDTRCTS": "syscall",
"syscall.TIOCFLAG_CLOCAL": "syscall",
"syscall.TIOCFLAG_CRTSCTS": "syscall",
"syscall.TIOCFLAG_MDMBUF": "syscall",
"syscall.TIOCFLAG_PPS": "syscall",
"syscall.TIOCFLAG_SOFTCAR": "syscall",
"syscall.TIOCFLUSH": "syscall",
"syscall.TIOCGDEV": "syscall",
"syscall.TIOCGDRAINWAIT": "syscall",
"syscall.TIOCGETA": "syscall",
"syscall.TIOCGETD": "syscall",
"syscall.TIOCGFLAGS": "syscall",
"syscall.TIOCGICOUNT": "syscall",
"syscall.TIOCGLCKTRMIOS": "syscall",
"syscall.TIOCGLINED": "syscall",
"syscall.TIOCGPGRP": "syscall",
"syscall.TIOCGPTN": "syscall",
"syscall.TIOCGQSIZE": "syscall",
"syscall.TIOCGRANTPT": "syscall",
"syscall.TIOCGRS485": "syscall",
"syscall.TIOCGSERIAL": "syscall",
"syscall.TIOCGSID": "syscall",
"syscall.TIOCGSIZE": "syscall",
"syscall.TIOCGSOFTCAR": "syscall",
"syscall.TIOCGTSTAMP": "syscall",
"syscall.TIOCGWINSZ": "syscall",
"syscall.TIOCINQ": "syscall",
"syscall.TIOCIXOFF": "syscall",
"syscall.TIOCIXON": "syscall",
"syscall.TIOCLINUX": "syscall",
"syscall.TIOCMBIC": "syscall",
"syscall.TIOCMBIS": "syscall",
"syscall.TIOCMGDTRWAIT": "syscall",
"syscall.TIOCMGET": "syscall",
"syscall.TIOCMIWAIT": "syscall",
"syscall.TIOCMODG": "syscall",
"syscall.TIOCMODS": "syscall",
"syscall.TIOCMSDTRWAIT": "syscall",
"syscall.TIOCMSET": "syscall",
"syscall.TIOCM_CAR": "syscall",
"syscall.TIOCM_CD": "syscall",
"syscall.TIOCM_CTS": "syscall",
"syscall.TIOCM_DCD": "syscall",
"syscall.TIOCM_DSR": "syscall",
"syscall.TIOCM_DTR": "syscall",
"syscall.TIOCM_LE": "syscall",
"syscall.TIOCM_RI": "syscall",
"syscall.TIOCM_RNG": "syscall",
"syscall.TIOCM_RTS": "syscall",
"syscall.TIOCM_SR": "syscall",
"syscall.TIOCM_ST": "syscall",
"syscall.TIOCNOTTY": "syscall",
"syscall.TIOCNXCL": "syscall",
"syscall.TIOCOUTQ": "syscall",
"syscall.TIOCPKT": "syscall",
"syscall.TIOCPKT_DATA": "syscall",
"syscall.TIOCPKT_DOSTOP": "syscall",
"syscall.TIOCPKT_FLUSHREAD": "syscall",
"syscall.TIOCPKT_FLUSHWRITE": "syscall",
"syscall.TIOCPKT_IOCTL": "syscall",
"syscall.TIOCPKT_NOSTOP": "syscall",
"syscall.TIOCPKT_START": "syscall",
"syscall.TIOCPKT_STOP": "syscall",
"syscall.TIOCPTMASTER": "syscall",
"syscall.TIOCPTMGET": "syscall",
"syscall.TIOCPTSNAME": "syscall",
"syscall.TIOCPTYGNAME": "syscall",
"syscall.TIOCPTYGRANT": "syscall",
"syscall.TIOCPTYUNLK": "syscall",
"syscall.TIOCRCVFRAME": "syscall",
"syscall.TIOCREMOTE": "syscall",
"syscall.TIOCSBRK": "syscall",
"syscall.TIOCSCONS": "syscall",
"syscall.TIOCSCTTY": "syscall",
"syscall.TIOCSDRAINWAIT": "syscall",
"syscall.TIOCSDTR": "syscall",
"syscall.TIOCSERCONFIG": "syscall",
"syscall.TIOCSERGETLSR": "syscall",
"syscall.TIOCSERGETMULTI": "syscall",
"syscall.TIOCSERGSTRUCT": "syscall",
"syscall.TIOCSERGWILD": "syscall",
"syscall.TIOCSERSETMULTI": "syscall",
"syscall.TIOCSERSWILD": "syscall",
"syscall.TIOCSER_TEMT": "syscall",
"syscall.TIOCSETA": "syscall",
"syscall.TIOCSETAF": "syscall",
"syscall.TIOCSETAW": "syscall",
"syscall.TIOCSETD": "syscall",
"syscall.TIOCSFLAGS": "syscall",
"syscall.TIOCSIG": "syscall",
"syscall.TIOCSLCKTRMIOS": "syscall",
"syscall.TIOCSLINED": "syscall",
"syscall.TIOCSPGRP": "syscall",
"syscall.TIOCSPTLCK": "syscall",
"syscall.TIOCSQSIZE": "syscall",
"syscall.TIOCSRS485": "syscall",
"syscall.TIOCSSERIAL": "syscall",
"syscall.TIOCSSIZE": "syscall",
"syscall.TIOCSSOFTCAR": "syscall",
"syscall.TIOCSTART": "syscall",
"syscall.TIOCSTAT": "syscall",
"syscall.TIOCSTI": "syscall",
"syscall.TIOCSTOP": "syscall",
"syscall.TIOCSTSTAMP": "syscall",
"syscall.TIOCSWINSZ": "syscall",
"syscall.TIOCTIMESTAMP": "syscall",
"syscall.TIOCUCNTL": "syscall",
"syscall.TIOCVHANGUP": "syscall",
"syscall.TIOCXMTFRAME": "syscall",
"syscall.TOKEN_ADJUST_DEFAULT": "syscall",
"syscall.TOKEN_ADJUST_GROUPS": "syscall",
"syscall.TOKEN_ADJUST_PRIVILEGES": "syscall",
"syscall.TOKEN_ALL_ACCESS": "syscall",
"syscall.TOKEN_ASSIGN_PRIMARY": "syscall",
"syscall.TOKEN_DUPLICATE": "syscall",
"syscall.TOKEN_EXECUTE": "syscall",
"syscall.TOKEN_IMPERSONATE": "syscall",
"syscall.TOKEN_QUERY": "syscall",
"syscall.TOKEN_QUERY_SOURCE": "syscall",
"syscall.TOKEN_READ": "syscall",
"syscall.TOKEN_WRITE": "syscall",
"syscall.TOSTOP": "syscall",
"syscall.TRUNCATE_EXISTING": "syscall",
"syscall.TUNATTACHFILTER": "syscall",
"syscall.TUNDETACHFILTER": "syscall",
"syscall.TUNGETFEATURES": "syscall",
"syscall.TUNGETIFF": "syscall",
"syscall.TUNGETSNDBUF": "syscall",
"syscall.TUNGETVNETHDRSZ": "syscall",
"syscall.TUNSETDEBUG": "syscall",
"syscall.TUNSETGROUP": "syscall",
"syscall.TUNSETIFF": "syscall",
"syscall.TUNSETLINK": "syscall",
"syscall.TUNSETNOCSUM": "syscall",
"syscall.TUNSETOFFLOAD": "syscall",
"syscall.TUNSETOWNER": "syscall",
"syscall.TUNSETPERSIST": "syscall",
"syscall.TUNSETSNDBUF": "syscall",
"syscall.TUNSETTXFILTER": "syscall",
"syscall.TUNSETVNETHDRSZ": "syscall",
"syscall.Tee": "syscall",
"syscall.TerminateProcess": "syscall",
"syscall.Termios": "syscall",
"syscall.Tgkill": "syscall",
"syscall.Time": "syscall",
"syscall.Time_t": "syscall",
"syscall.Times": "syscall",
"syscall.Timespec": "syscall",
"syscall.TimespecToNsec": "syscall",
"syscall.Timeval": "syscall",
"syscall.Timeval32": "syscall",
"syscall.TimevalToNsec": "syscall",
"syscall.Timex": "syscall",
"syscall.Timezoneinformation": "syscall",
"syscall.Tms": "syscall",
"syscall.Token": "syscall",
"syscall.TokenAccessInformation": "syscall",
"syscall.TokenAuditPolicy": "syscall",
"syscall.TokenDefaultDacl": "syscall",
"syscall.TokenElevation": "syscall",
"syscall.TokenElevationType": "syscall",
"syscall.TokenGroups": "syscall",
"syscall.TokenGroupsAndPrivileges": "syscall",
"syscall.TokenHasRestrictions": "syscall",
"syscall.TokenImpersonationLevel": "syscall",
"syscall.TokenIntegrityLevel": "syscall",
"syscall.TokenLinkedToken": "syscall",
"syscall.TokenLogonSid": "syscall",
"syscall.TokenMandatoryPolicy": "syscall",
"syscall.TokenOrigin": "syscall",
"syscall.TokenOwner": "syscall",
"syscall.TokenPrimaryGroup": "syscall",
"syscall.TokenPrivileges": "syscall",
"syscall.TokenRestrictedSids": "syscall",
"syscall.TokenSandBoxInert": "syscall",
"syscall.TokenSessionId": "syscall",
"syscall.TokenSessionReference": "syscall",
"syscall.TokenSource": "syscall",
"syscall.TokenStatistics": "syscall",
"syscall.TokenType": "syscall",
"syscall.TokenUIAccess": "syscall",
"syscall.TokenUser": "syscall",
"syscall.TokenVirtualizationAllowed": "syscall",
"syscall.TokenVirtualizationEnabled": "syscall",
"syscall.Tokenprimarygroup": "syscall",
"syscall.Tokenuser": "syscall",
"syscall.TranslateAccountName": "syscall",
"syscall.TranslateName": "syscall",
"syscall.TransmitFile": "syscall",
"syscall.TransmitFileBuffers": "syscall",
"syscall.Truncate": "syscall",
"syscall.USAGE_MATCH_TYPE_AND": "syscall",
"syscall.USAGE_MATCH_TYPE_OR": "syscall",
"syscall.UTF16FromString": "syscall",
"syscall.UTF16PtrFromString": "syscall",
"syscall.UTF16ToString": "syscall",
"syscall.Ucred": "syscall",
"syscall.Umask": "syscall",
"syscall.Uname": "syscall",
"syscall.Undelete": "syscall",
"syscall.UnixCredentials": "syscall",
"syscall.UnixRights": "syscall",
"syscall.Unlink": "syscall",
"syscall.Unlinkat": "syscall",
"syscall.UnmapViewOfFile": "syscall",
"syscall.Unmount": "syscall",
"syscall.Unsetenv": "syscall",
"syscall.Unshare": "syscall",
"syscall.UserInfo10": "syscall",
"syscall.Ustat": "syscall",
"syscall.Ustat_t": "syscall",
"syscall.Utimbuf": "syscall",
"syscall.Utime": "syscall",
"syscall.Utimes": "syscall",
"syscall.UtimesNano": "syscall",
"syscall.Utsname": "syscall",
"syscall.VDISCARD": "syscall",
"syscall.VDSUSP": "syscall",
"syscall.VEOF": "syscall",
"syscall.VEOL": "syscall",
"syscall.VEOL2": "syscall",
"syscall.VERASE": "syscall",
"syscall.VERASE2": "syscall",
"syscall.VINTR": "syscall",
"syscall.VKILL": "syscall",
"syscall.VLNEXT": "syscall",
"syscall.VMIN": "syscall",
"syscall.VQUIT": "syscall",
"syscall.VREPRINT": "syscall",
"syscall.VSTART": "syscall",
"syscall.VSTATUS": "syscall",
"syscall.VSTOP": "syscall",
"syscall.VSUSP": "syscall",
"syscall.VSWTC": "syscall",
"syscall.VT0": "syscall",
"syscall.VT1": "syscall",
"syscall.VTDLY": "syscall",
"syscall.VTIME": "syscall",
"syscall.VWERASE": "syscall",
"syscall.VirtualLock": "syscall",
"syscall.VirtualUnlock": "syscall",
"syscall.WAIT_ABANDONED": "syscall",
"syscall.WAIT_FAILED": "syscall",
"syscall.WAIT_OBJECT_0": "syscall",
"syscall.WAIT_TIMEOUT": "syscall",
"syscall.WALL": "syscall",
"syscall.WALLSIG": "syscall",
"syscall.WALTSIG": "syscall",
"syscall.WCLONE": "syscall",
"syscall.WCONTINUED": "syscall",
"syscall.WCOREFLAG": "syscall",
"syscall.WEXITED": "syscall",
"syscall.WLINUXCLONE": "syscall",
"syscall.WNOHANG": "syscall",
"syscall.WNOTHREAD": "syscall",
"syscall.WNOWAIT": "syscall",
"syscall.WNOZOMBIE": "syscall",
"syscall.WOPTSCHECKED": "syscall",
"syscall.WORDSIZE": "syscall",
"syscall.WSABuf": "syscall",
"syscall.WSACleanup": "syscall",
"syscall.WSADESCRIPTION_LEN": "syscall",
"syscall.WSAData": "syscall",
"syscall.WSAEACCES": "syscall",
"syscall.WSAECONNRESET": "syscall",
"syscall.WSAEnumProtocols": "syscall",
"syscall.WSAID_CONNECTEX": "syscall",
"syscall.WSAIoctl": "syscall",
"syscall.WSAPROTOCOL_LEN": "syscall",
"syscall.WSAProtocolChain": "syscall",
"syscall.WSAProtocolInfo": "syscall",
"syscall.WSARecv": "syscall",
"syscall.WSARecvFrom": "syscall",
"syscall.WSASYS_STATUS_LEN": "syscall",
"syscall.WSASend": "syscall",
"syscall.WSASendTo": "syscall",
"syscall.WSASendto": "syscall",
"syscall.WSAStartup": "syscall",
"syscall.WSTOPPED": "syscall",
"syscall.WTRAPPED": "syscall",
"syscall.WUNTRACED": "syscall",
"syscall.Wait4": "syscall",
"syscall.WaitForSingleObject": "syscall",
"syscall.WaitStatus": "syscall",
"syscall.Win32FileAttributeData": "syscall",
"syscall.Win32finddata": "syscall",
"syscall.Write": "syscall",
"syscall.WriteConsole": "syscall",
"syscall.WriteFile": "syscall",
"syscall.X509_ASN_ENCODING": "syscall",
"syscall.XCASE": "syscall",
"syscall.XP1_CONNECTIONLESS": "syscall",
"syscall.XP1_CONNECT_DATA": "syscall",
"syscall.XP1_DISCONNECT_DATA": "syscall",
"syscall.XP1_EXPEDITED_DATA": "syscall",
"syscall.XP1_GRACEFUL_CLOSE": "syscall",
"syscall.XP1_GUARANTEED_DELIVERY": "syscall",
"syscall.XP1_GUARANTEED_ORDER": "syscall",
"syscall.XP1_IFS_HANDLES": "syscall",
"syscall.XP1_MESSAGE_ORIENTED": "syscall",
"syscall.XP1_MULTIPOINT_CONTROL_PLANE": "syscall",
"syscall.XP1_MULTIPOINT_DATA_PLANE": "syscall",
"syscall.XP1_PARTIAL_MESSAGE": "syscall",
"syscall.XP1_PSEUDO_STREAM": "syscall",
"syscall.XP1_QOS_SUPPORTED": "syscall",
"syscall.XP1_SAN_SUPPORT_SDP": "syscall",
"syscall.XP1_SUPPORT_BROADCAST": "syscall",
"syscall.XP1_SUPPORT_MULTIPOINT": "syscall",
"syscall.XP1_UNI_RECV": "syscall",
"syscall.XP1_UNI_SEND": "syscall",
"syslog.Dial": "log/syslog",
"syslog.LOG_ALERT": "log/syslog",
"syslog.LOG_AUTH": "log/syslog",
"syslog.LOG_AUTHPRIV": "log/syslog",
"syslog.LOG_CRIT": "log/syslog",
"syslog.LOG_CRON": "log/syslog",
"syslog.LOG_DAEMON": "log/syslog",
"syslog.LOG_DEBUG": "log/syslog",
"syslog.LOG_EMERG": "log/syslog",
"syslog.LOG_ERR": "log/syslog",
"syslog.LOG_FTP": "log/syslog",
"syslog.LOG_INFO": "log/syslog",
"syslog.LOG_KERN": "log/syslog",
"syslog.LOG_LOCAL0": "log/syslog",
"syslog.LOG_LOCAL1": "log/syslog",
"syslog.LOG_LOCAL2": "log/syslog",
"syslog.LOG_LOCAL3": "log/syslog",
"syslog.LOG_LOCAL4": "log/syslog",
"syslog.LOG_LOCAL5": "log/syslog",
"syslog.LOG_LOCAL6": "log/syslog",
"syslog.LOG_LOCAL7": "log/syslog",
"syslog.LOG_LPR": "log/syslog",
"syslog.LOG_MAIL": "log/syslog",
"syslog.LOG_NEWS": "log/syslog",
"syslog.LOG_NOTICE": "log/syslog",
"syslog.LOG_SYSLOG": "log/syslog",
"syslog.LOG_USER": "log/syslog",
"syslog.LOG_UUCP": "log/syslog",
"syslog.LOG_WARNING": "log/syslog",
"syslog.New": "log/syslog",
"syslog.NewLogger": "log/syslog",
"syslog.Priority": "log/syslog",
"syslog.Writer": "log/syslog",
"tabwriter.AlignRight": "text/tabwriter",
"tabwriter.Debug": "text/tabwriter",
"tabwriter.DiscardEmptyColumns": "text/tabwriter",
"tabwriter.Escape": "text/tabwriter",
"tabwriter.FilterHTML": "text/tabwriter",
"tabwriter.NewWriter": "text/tabwriter",
"tabwriter.StripEscape": "text/tabwriter",
"tabwriter.TabIndent": "text/tabwriter",
"tabwriter.Writer": "text/tabwriter",
"tar.ErrFieldTooLong": "archive/tar",
"tar.ErrHeader": "archive/tar",
"tar.ErrWriteAfterClose": "archive/tar",
"tar.ErrWriteTooLong": "archive/tar",
"tar.FileInfoHeader": "archive/tar",
"tar.Header": "archive/tar",
"tar.NewReader": "archive/tar",
"tar.NewWriter": "archive/tar",
"tar.Reader": "archive/tar",
"tar.TypeBlock": "archive/tar",
"tar.TypeChar": "archive/tar",
"tar.TypeCont": "archive/tar",
"tar.TypeDir": "archive/tar",
"tar.TypeFifo": "archive/tar",
"tar.TypeGNULongLink": "archive/tar",
"tar.TypeGNULongName": "archive/tar",
"tar.TypeGNUSparse": "archive/tar",
"tar.TypeLink": "archive/tar",
"tar.TypeReg": "archive/tar",
"tar.TypeRegA": "archive/tar",
"tar.TypeSymlink": "archive/tar",
"tar.TypeXGlobalHeader": "archive/tar",
"tar.TypeXHeader": "archive/tar",
"tar.Writer": "archive/tar",
"template.CSS": "html/template",
"template.ErrAmbigContext": "html/template",
"template.ErrBadHTML": "html/template",
"template.ErrBranchEnd": "html/template",
"template.ErrEndContext": "html/template",
"template.ErrNoSuchTemplate": "html/template",
"template.ErrOutputContext": "html/template",
"template.ErrPartialCharset": "html/template",
"template.ErrPartialEscape": "html/template",
"template.ErrRangeLoopReentry": "html/template",
"template.ErrSlashAmbig": "html/template",
"template.Error": "html/template",
"template.ErrorCode": "html/template",
// "template.FuncMap" is ambiguous
"template.HTML": "html/template",
"template.HTMLAttr": "html/template",
// "template.HTMLEscape" is ambiguous
// "template.HTMLEscapeString" is ambiguous
// "template.HTMLEscaper" is ambiguous
"template.JS": "html/template",
// "template.JSEscape" is ambiguous
// "template.JSEscapeString" is ambiguous
// "template.JSEscaper" is ambiguous
"template.JSStr": "html/template",
// "template.Must" is ambiguous
// "template.New" is ambiguous
"template.OK": "html/template",
// "template.ParseFiles" is ambiguous
// "template.ParseGlob" is ambiguous
// "template.Template" is ambiguous
"template.URL": "html/template",
// "template.URLQueryEscaper" is ambiguous
"testing.AllocsPerRun": "testing",
"testing.B": "testing",
"testing.Benchmark": "testing",
"testing.BenchmarkResult": "testing",
"testing.Cover": "testing",
"testing.CoverBlock": "testing",
"testing.Coverage": "testing",
"testing.InternalBenchmark": "testing",
"testing.InternalExample": "testing",
"testing.InternalTest": "testing",
"testing.M": "testing",
"testing.Main": "testing",
"testing.MainStart": "testing",
"testing.PB": "testing",
"testing.RegisterCover": "testing",
"testing.RunBenchmarks": "testing",
"testing.RunExamples": "testing",
"testing.RunTests": "testing",
"testing.Short": "testing",
"testing.T": "testing",
"testing.Verbose": "testing",
"textproto.CanonicalMIMEHeaderKey": "net/textproto",
"textproto.Conn": "net/textproto",
"textproto.Dial": "net/textproto",
"textproto.Error": "net/textproto",
"textproto.MIMEHeader": "net/textproto",
"textproto.NewConn": "net/textproto",
"textproto.NewReader": "net/textproto",
"textproto.NewWriter": "net/textproto",
"textproto.Pipeline": "net/textproto",
"textproto.ProtocolError": "net/textproto",
"textproto.Reader": "net/textproto",
"textproto.TrimBytes": "net/textproto",
"textproto.TrimString": "net/textproto",
"textproto.Writer": "net/textproto",
"time.ANSIC": "time",
"time.After": "time",
"time.AfterFunc": "time",
"time.April": "time",
"time.August": "time",
"time.Date": "time",
"time.December": "time",
"time.Duration": "time",
"time.February": "time",
"time.FixedZone": "time",
"time.Friday": "time",
"time.Hour": "time",
"time.January": "time",
"time.July": "time",
"time.June": "time",
"time.Kitchen": "time",
"time.LoadLocation": "time",
"time.Local": "time",
"time.Location": "time",
"time.March": "time",
"time.May": "time",
"time.Microsecond": "time",
"time.Millisecond": "time",
"time.Minute": "time",
"time.Monday": "time",
"time.Month": "time",
"time.Nanosecond": "time",
"time.NewTicker": "time",
"time.NewTimer": "time",
"time.November": "time",
"time.Now": "time",
"time.October": "time",
"time.Parse": "time",
"time.ParseDuration": "time",
"time.ParseError": "time",
"time.ParseInLocation": "time",
"time.RFC1123": "time",
"time.RFC1123Z": "time",
"time.RFC3339": "time",
"time.RFC3339Nano": "time",
"time.RFC822": "time",
"time.RFC822Z": "time",
"time.RFC850": "time",
"time.RubyDate": "time",
"time.Saturday": "time",
"time.Second": "time",
"time.September": "time",
"time.Since": "time",
"time.Sleep": "time",
"time.Stamp": "time",
"time.StampMicro": "time",
"time.StampMilli": "time",
"time.StampNano": "time",
"time.Sunday": "time",
"time.Thursday": "time",
"time.Tick": "time",
"time.Ticker": "time",
"time.Time": "time",
"time.Timer": "time",
"time.Tuesday": "time",
"time.UTC": "time",
"time.Unix": "time",
"time.UnixDate": "time",
"time.Wednesday": "time",
"time.Weekday": "time",
"tls.Certificate": "crypto/tls",
"tls.Client": "crypto/tls",
"tls.ClientAuthType": "crypto/tls",
"tls.ClientHelloInfo": "crypto/tls",
"tls.ClientSessionCache": "crypto/tls",
"tls.ClientSessionState": "crypto/tls",
"tls.Config": "crypto/tls",
"tls.Conn": "crypto/tls",
"tls.ConnectionState": "crypto/tls",
"tls.CurveID": "crypto/tls",
"tls.CurveP256": "crypto/tls",
"tls.CurveP384": "crypto/tls",
"tls.CurveP521": "crypto/tls",
"tls.Dial": "crypto/tls",
"tls.DialWithDialer": "crypto/tls",
"tls.Listen": "crypto/tls",
"tls.LoadX509KeyPair": "crypto/tls",
"tls.NewLRUClientSessionCache": "crypto/tls",
"tls.NewListener": "crypto/tls",
"tls.NoClientCert": "crypto/tls",
"tls.RequestClientCert": "crypto/tls",
"tls.RequireAndVerifyClientCert": "crypto/tls",
"tls.RequireAnyClientCert": "crypto/tls",
"tls.Server": "crypto/tls",
"tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": "crypto/tls",
"tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": "crypto/tls",
"tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": "crypto/tls",
"tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": "crypto/tls",
"tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": "crypto/tls",
"tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": "crypto/tls",
"tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": "crypto/tls",
"tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": "crypto/tls",
"tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": "crypto/tls",
"tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": "crypto/tls",
"tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA": "crypto/tls",
"tls.TLS_FALLBACK_SCSV": "crypto/tls",
"tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA": "crypto/tls",
"tls.TLS_RSA_WITH_AES_128_CBC_SHA": "crypto/tls",
"tls.TLS_RSA_WITH_AES_256_CBC_SHA": "crypto/tls",
"tls.TLS_RSA_WITH_RC4_128_SHA": "crypto/tls",
"tls.VerifyClientCertIfGiven": "crypto/tls",
"tls.VersionSSL30": "crypto/tls",
"tls.VersionTLS10": "crypto/tls",
"tls.VersionTLS11": "crypto/tls",
"tls.VersionTLS12": "crypto/tls",
"tls.X509KeyPair": "crypto/tls",
"token.ADD": "go/token",
"token.ADD_ASSIGN": "go/token",
"token.AND": "go/token",
"token.AND_ASSIGN": "go/token",
"token.AND_NOT": "go/token",
"token.AND_NOT_ASSIGN": "go/token",
"token.ARROW": "go/token",
"token.ASSIGN": "go/token",
"token.BREAK": "go/token",
"token.CASE": "go/token",
"token.CHAN": "go/token",
"token.CHAR": "go/token",
"token.COLON": "go/token",
"token.COMMA": "go/token",
"token.COMMENT": "go/token",
"token.CONST": "go/token",
"token.CONTINUE": "go/token",
"token.DEC": "go/token",
"token.DEFAULT": "go/token",
"token.DEFER": "go/token",
"token.DEFINE": "go/token",
"token.ELLIPSIS": "go/token",
"token.ELSE": "go/token",
"token.EOF": "go/token",
"token.EQL": "go/token",
"token.FALLTHROUGH": "go/token",
"token.FLOAT": "go/token",
"token.FOR": "go/token",
"token.FUNC": "go/token",
"token.File": "go/token",
"token.FileSet": "go/token",
"token.GEQ": "go/token",
"token.GO": "go/token",
"token.GOTO": "go/token",
"token.GTR": "go/token",
"token.HighestPrec": "go/token",
"token.IDENT": "go/token",
"token.IF": "go/token",
"token.ILLEGAL": "go/token",
"token.IMAG": "go/token",
"token.IMPORT": "go/token",
"token.INC": "go/token",
"token.INT": "go/token",
"token.INTERFACE": "go/token",
"token.LAND": "go/token",
"token.LBRACE": "go/token",
"token.LBRACK": "go/token",
"token.LEQ": "go/token",
"token.LOR": "go/token",
"token.LPAREN": "go/token",
"token.LSS": "go/token",
"token.Lookup": "go/token",
"token.LowestPrec": "go/token",
"token.MAP": "go/token",
"token.MUL": "go/token",
"token.MUL_ASSIGN": "go/token",
"token.NEQ": "go/token",
"token.NOT": "go/token",
"token.NewFileSet": "go/token",
"token.NoPos": "go/token",
"token.OR": "go/token",
"token.OR_ASSIGN": "go/token",
"token.PACKAGE": "go/token",
"token.PERIOD": "go/token",
"token.Pos": "go/token",
"token.Position": "go/token",
"token.QUO": "go/token",
"token.QUO_ASSIGN": "go/token",
"token.RANGE": "go/token",
"token.RBRACE": "go/token",
"token.RBRACK": "go/token",
"token.REM": "go/token",
"token.REM_ASSIGN": "go/token",
"token.RETURN": "go/token",
"token.RPAREN": "go/token",
"token.SELECT": "go/token",
"token.SEMICOLON": "go/token",
"token.SHL": "go/token",
"token.SHL_ASSIGN": "go/token",
"token.SHR": "go/token",
"token.SHR_ASSIGN": "go/token",
"token.STRING": "go/token",
"token.STRUCT": "go/token",
"token.SUB": "go/token",
"token.SUB_ASSIGN": "go/token",
"token.SWITCH": "go/token",
"token.TYPE": "go/token",
"token.Token": "go/token",
"token.UnaryPrec": "go/token",
"token.VAR": "go/token",
"token.XOR": "go/token",
"token.XOR_ASSIGN": "go/token",
"trace.Start": "runtime/trace",
"trace.Stop": "runtime/trace",
"types.Array": "go/types",
"types.AssertableTo": "go/types",
"types.AssignableTo": "go/types",
"types.Basic": "go/types",
"types.BasicInfo": "go/types",
"types.BasicKind": "go/types",
"types.Bool": "go/types",
"types.Builtin": "go/types",
"types.Byte": "go/types",
"types.Chan": "go/types",
"types.ChanDir": "go/types",
"types.Checker": "go/types",
"types.Comparable": "go/types",
"types.Complex128": "go/types",
"types.Complex64": "go/types",
"types.Config": "go/types",
"types.Const": "go/types",
"types.ConvertibleTo": "go/types",
"types.DefPredeclaredTestFuncs": "go/types",
"types.Error": "go/types",
"types.Eval": "go/types",
"types.ExprString": "go/types",
"types.FieldVal": "go/types",
"types.Float32": "go/types",
"types.Float64": "go/types",
"types.Func": "go/types",
"types.Id": "go/types",
"types.Identical": "go/types",
"types.Implements": "go/types",
"types.Importer": "go/types",
"types.Info": "go/types",
"types.Initializer": "go/types",
"types.Int": "go/types",
"types.Int16": "go/types",
"types.Int32": "go/types",
"types.Int64": "go/types",
"types.Int8": "go/types",
"types.Interface": "go/types",
"types.Invalid": "go/types",
"types.IsBoolean": "go/types",
"types.IsComplex": "go/types",
"types.IsConstType": "go/types",
"types.IsFloat": "go/types",
"types.IsInteger": "go/types",
"types.IsInterface": "go/types",
"types.IsNumeric": "go/types",
"types.IsOrdered": "go/types",
"types.IsString": "go/types",
"types.IsUnsigned": "go/types",
"types.IsUntyped": "go/types",
"types.Label": "go/types",
"types.LookupFieldOrMethod": "go/types",
"types.Map": "go/types",
"types.MethodExpr": "go/types",
"types.MethodSet": "go/types",
"types.MethodVal": "go/types",
"types.MissingMethod": "go/types",
"types.Named": "go/types",
"types.NewArray": "go/types",
"types.NewChan": "go/types",
"types.NewChecker": "go/types",
"types.NewConst": "go/types",
"types.NewField": "go/types",
"types.NewFunc": "go/types",
"types.NewInterface": "go/types",
"types.NewLabel": "go/types",
"types.NewMap": "go/types",
"types.NewMethodSet": "go/types",
"types.NewNamed": "go/types",
"types.NewPackage": "go/types",
"types.NewParam": "go/types",
"types.NewPkgName": "go/types",
"types.NewPointer": "go/types",
"types.NewScope": "go/types",
"types.NewSignature": "go/types",
"types.NewSlice": "go/types",
"types.NewStruct": "go/types",
"types.NewTuple": "go/types",
"types.NewTypeName": "go/types",
"types.NewVar": "go/types",
"types.Nil": "go/types",
"types.ObjectString": "go/types",
"types.Package": "go/types",
"types.PkgName": "go/types",
"types.Pointer": "go/types",
"types.Qualifier": "go/types",
"types.RecvOnly": "go/types",
"types.RelativeTo": "go/types",
"types.Rune": "go/types",
"types.Scope": "go/types",
"types.Selection": "go/types",
"types.SelectionKind": "go/types",
"types.SelectionString": "go/types",
"types.SendOnly": "go/types",
"types.SendRecv": "go/types",
"types.Signature": "go/types",
"types.Sizes": "go/types",
"types.Slice": "go/types",
"types.StdSizes": "go/types",
"types.String": "go/types",
"types.Struct": "go/types",
"types.Tuple": "go/types",
"types.Typ": "go/types",
"types.Type": "go/types",
"types.TypeAndValue": "go/types",
"types.TypeName": "go/types",
"types.TypeString": "go/types",
"types.Uint": "go/types",
"types.Uint16": "go/types",
"types.Uint32": "go/types",
"types.Uint64": "go/types",
"types.Uint8": "go/types",
"types.Uintptr": "go/types",
"types.Universe": "go/types",
"types.Unsafe": "go/types",
"types.UnsafePointer": "go/types",
"types.UntypedBool": "go/types",
"types.UntypedComplex": "go/types",
"types.UntypedFloat": "go/types",
"types.UntypedInt": "go/types",
"types.UntypedNil": "go/types",
"types.UntypedRune": "go/types",
"types.UntypedString": "go/types",
"types.Var": "go/types",
"types.WriteExpr": "go/types",
"types.WriteSignature": "go/types",
"types.WriteType": "go/types",
"unicode.ASCII_Hex_Digit": "unicode",
"unicode.Ahom": "unicode",
"unicode.Anatolian_Hieroglyphs": "unicode",
"unicode.Arabic": "unicode",
"unicode.Armenian": "unicode",
"unicode.Avestan": "unicode",
"unicode.AzeriCase": "unicode",
"unicode.Balinese": "unicode",
"unicode.Bamum": "unicode",
"unicode.Bassa_Vah": "unicode",
"unicode.Batak": "unicode",
"unicode.Bengali": "unicode",
"unicode.Bidi_Control": "unicode",
"unicode.Bopomofo": "unicode",
"unicode.Brahmi": "unicode",
"unicode.Braille": "unicode",
"unicode.Buginese": "unicode",
"unicode.Buhid": "unicode",
"unicode.C": "unicode",
"unicode.Canadian_Aboriginal": "unicode",
"unicode.Carian": "unicode",
"unicode.CaseRange": "unicode",
"unicode.CaseRanges": "unicode",
"unicode.Categories": "unicode",
"unicode.Caucasian_Albanian": "unicode",
"unicode.Cc": "unicode",
"unicode.Cf": "unicode",
"unicode.Chakma": "unicode",
"unicode.Cham": "unicode",
"unicode.Cherokee": "unicode",
"unicode.Co": "unicode",
"unicode.Common": "unicode",
"unicode.Coptic": "unicode",
"unicode.Cs": "unicode",
"unicode.Cuneiform": "unicode",
"unicode.Cypriot": "unicode",
"unicode.Cyrillic": "unicode",
"unicode.Dash": "unicode",
"unicode.Deprecated": "unicode",
"unicode.Deseret": "unicode",
"unicode.Devanagari": "unicode",
"unicode.Diacritic": "unicode",
"unicode.Digit": "unicode",
"unicode.Duployan": "unicode",
"unicode.Egyptian_Hieroglyphs": "unicode",
"unicode.Elbasan": "unicode",
"unicode.Ethiopic": "unicode",
"unicode.Extender": "unicode",
"unicode.FoldCategory": "unicode",
"unicode.FoldScript": "unicode",
"unicode.Georgian": "unicode",
"unicode.Glagolitic": "unicode",
"unicode.Gothic": "unicode",
"unicode.Grantha": "unicode",
"unicode.GraphicRanges": "unicode",
"unicode.Greek": "unicode",
"unicode.Gujarati": "unicode",
"unicode.Gurmukhi": "unicode",
"unicode.Han": "unicode",
"unicode.Hangul": "unicode",
"unicode.Hanunoo": "unicode",
"unicode.Hatran": "unicode",
"unicode.Hebrew": "unicode",
"unicode.Hex_Digit": "unicode",
"unicode.Hiragana": "unicode",
"unicode.Hyphen": "unicode",
"unicode.IDS_Binary_Operator": "unicode",
"unicode.IDS_Trinary_Operator": "unicode",
"unicode.Ideographic": "unicode",
"unicode.Imperial_Aramaic": "unicode",
"unicode.In": "unicode",
"unicode.Inherited": "unicode",
"unicode.Inscriptional_Pahlavi": "unicode",
"unicode.Inscriptional_Parthian": "unicode",
"unicode.Is": "unicode",
"unicode.IsControl": "unicode",
"unicode.IsDigit": "unicode",
"unicode.IsGraphic": "unicode",
"unicode.IsLetter": "unicode",
"unicode.IsLower": "unicode",
"unicode.IsMark": "unicode",
"unicode.IsNumber": "unicode",
"unicode.IsOneOf": "unicode",
"unicode.IsPrint": "unicode",
"unicode.IsPunct": "unicode",
"unicode.IsSpace": "unicode",
"unicode.IsSymbol": "unicode",
"unicode.IsTitle": "unicode",
"unicode.IsUpper": "unicode",
"unicode.Javanese": "unicode",
"unicode.Join_Control": "unicode",
"unicode.Kaithi": "unicode",
"unicode.Kannada": "unicode",
"unicode.Katakana": "unicode",
"unicode.Kayah_Li": "unicode",
"unicode.Kharoshthi": "unicode",
"unicode.Khmer": "unicode",
"unicode.Khojki": "unicode",
"unicode.Khudawadi": "unicode",
"unicode.L": "unicode",
"unicode.Lao": "unicode",
"unicode.Latin": "unicode",
"unicode.Lepcha": "unicode",
"unicode.Letter": "unicode",
"unicode.Limbu": "unicode",
"unicode.Linear_A": "unicode",
"unicode.Linear_B": "unicode",
"unicode.Lisu": "unicode",
"unicode.Ll": "unicode",
"unicode.Lm": "unicode",
"unicode.Lo": "unicode",
"unicode.Logical_Order_Exception": "unicode",
"unicode.Lower": "unicode",
"unicode.LowerCase": "unicode",
"unicode.Lt": "unicode",
"unicode.Lu": "unicode",
"unicode.Lycian": "unicode",
"unicode.Lydian": "unicode",
"unicode.M": "unicode",
"unicode.Mahajani": "unicode",
"unicode.Malayalam": "unicode",
"unicode.Mandaic": "unicode",
"unicode.Manichaean": "unicode",
"unicode.Mark": "unicode",
"unicode.MaxASCII": "unicode",
"unicode.MaxCase": "unicode",
"unicode.MaxLatin1": "unicode",
"unicode.MaxRune": "unicode",
"unicode.Mc": "unicode",
"unicode.Me": "unicode",
"unicode.Meetei_Mayek": "unicode",
"unicode.Mende_Kikakui": "unicode",
"unicode.Meroitic_Cursive": "unicode",
"unicode.Meroitic_Hieroglyphs": "unicode",
"unicode.Miao": "unicode",
"unicode.Mn": "unicode",
"unicode.Modi": "unicode",
"unicode.Mongolian": "unicode",
"unicode.Mro": "unicode",
"unicode.Multani": "unicode",
"unicode.Myanmar": "unicode",
"unicode.N": "unicode",
"unicode.Nabataean": "unicode",
"unicode.Nd": "unicode",
"unicode.New_Tai_Lue": "unicode",
"unicode.Nko": "unicode",
"unicode.Nl": "unicode",
"unicode.No": "unicode",
"unicode.Noncharacter_Code_Point": "unicode",
"unicode.Number": "unicode",
"unicode.Ogham": "unicode",
"unicode.Ol_Chiki": "unicode",
"unicode.Old_Hungarian": "unicode",
"unicode.Old_Italic": "unicode",
"unicode.Old_North_Arabian": "unicode",
"unicode.Old_Permic": "unicode",
"unicode.Old_Persian": "unicode",
"unicode.Old_South_Arabian": "unicode",
"unicode.Old_Turkic": "unicode",
"unicode.Oriya": "unicode",
"unicode.Osmanya": "unicode",
"unicode.Other": "unicode",
"unicode.Other_Alphabetic": "unicode",
"unicode.Other_Default_Ignorable_Code_Point": "unicode",
"unicode.Other_Grapheme_Extend": "unicode",
"unicode.Other_ID_Continue": "unicode",
"unicode.Other_ID_Start": "unicode",
"unicode.Other_Lowercase": "unicode",
"unicode.Other_Math": "unicode",
"unicode.Other_Uppercase": "unicode",
"unicode.P": "unicode",
"unicode.Pahawh_Hmong": "unicode",
"unicode.Palmyrene": "unicode",
"unicode.Pattern_Syntax": "unicode",
"unicode.Pattern_White_Space": "unicode",
"unicode.Pau_Cin_Hau": "unicode",
"unicode.Pc": "unicode",
"unicode.Pd": "unicode",
"unicode.Pe": "unicode",
"unicode.Pf": "unicode",
"unicode.Phags_Pa": "unicode",
"unicode.Phoenician": "unicode",
"unicode.Pi": "unicode",
"unicode.Po": "unicode",
"unicode.PrintRanges": "unicode",
"unicode.Properties": "unicode",
"unicode.Ps": "unicode",
"unicode.Psalter_Pahlavi": "unicode",
"unicode.Punct": "unicode",
"unicode.Quotation_Mark": "unicode",
"unicode.Radical": "unicode",
"unicode.Range16": "unicode",
"unicode.Range32": "unicode",
"unicode.RangeTable": "unicode",
"unicode.Rejang": "unicode",
"unicode.ReplacementChar": "unicode",
"unicode.Runic": "unicode",
"unicode.S": "unicode",
"unicode.STerm": "unicode",
"unicode.Samaritan": "unicode",
"unicode.Saurashtra": "unicode",
"unicode.Sc": "unicode",
"unicode.Scripts": "unicode",
"unicode.Sharada": "unicode",
"unicode.Shavian": "unicode",
"unicode.Siddham": "unicode",
"unicode.SignWriting": "unicode",
"unicode.SimpleFold": "unicode",
"unicode.Sinhala": "unicode",
"unicode.Sk": "unicode",
"unicode.Sm": "unicode",
"unicode.So": "unicode",
"unicode.Soft_Dotted": "unicode",
"unicode.Sora_Sompeng": "unicode",
"unicode.Space": "unicode",
"unicode.SpecialCase": "unicode",
"unicode.Sundanese": "unicode",
"unicode.Syloti_Nagri": "unicode",
"unicode.Symbol": "unicode",
"unicode.Syriac": "unicode",
"unicode.Tagalog": "unicode",
"unicode.Tagbanwa": "unicode",
"unicode.Tai_Le": "unicode",
"unicode.Tai_Tham": "unicode",
"unicode.Tai_Viet": "unicode",
"unicode.Takri": "unicode",
"unicode.Tamil": "unicode",
"unicode.Telugu": "unicode",
"unicode.Terminal_Punctuation": "unicode",
"unicode.Thaana": "unicode",
"unicode.Thai": "unicode",
"unicode.Tibetan": "unicode",
"unicode.Tifinagh": "unicode",
"unicode.Tirhuta": "unicode",
"unicode.Title": "unicode",
"unicode.TitleCase": "unicode",
"unicode.To": "unicode",
"unicode.ToLower": "unicode",
"unicode.ToTitle": "unicode",
"unicode.ToUpper": "unicode",
"unicode.TurkishCase": "unicode",
"unicode.Ugaritic": "unicode",
"unicode.Unified_Ideograph": "unicode",
"unicode.Upper": "unicode",
"unicode.UpperCase": "unicode",
"unicode.UpperLower": "unicode",
"unicode.Vai": "unicode",
"unicode.Variation_Selector": "unicode",
"unicode.Version": "unicode",
"unicode.Warang_Citi": "unicode",
"unicode.White_Space": "unicode",
"unicode.Yi": "unicode",
"unicode.Z": "unicode",
"unicode.Zl": "unicode",
"unicode.Zp": "unicode",
"unicode.Zs": "unicode",
"url.Error": "net/url",
"url.EscapeError": "net/url",
"url.Parse": "net/url",
"url.ParseQuery": "net/url",
"url.ParseRequestURI": "net/url",
"url.QueryEscape": "net/url",
"url.QueryUnescape": "net/url",
"url.URL": "net/url",
"url.User": "net/url",
"url.UserPassword": "net/url",
"url.Userinfo": "net/url",
"url.Values": "net/url",
"user.Current": "os/user",
"user.Lookup": "os/user",
"user.LookupId": "os/user",
"user.UnknownUserError": "os/user",
"user.UnknownUserIdError": "os/user",
"user.User": "os/user",
"utf16.Decode": "unicode/utf16",
"utf16.DecodeRune": "unicode/utf16",
"utf16.Encode": "unicode/utf16",
"utf16.EncodeRune": "unicode/utf16",
"utf16.IsSurrogate": "unicode/utf16",
"utf8.DecodeLastRune": "unicode/utf8",
"utf8.DecodeLastRuneInString": "unicode/utf8",
"utf8.DecodeRune": "unicode/utf8",
"utf8.DecodeRuneInString": "unicode/utf8",
"utf8.EncodeRune": "unicode/utf8",
"utf8.FullRune": "unicode/utf8",
"utf8.FullRuneInString": "unicode/utf8",
"utf8.MaxRune": "unicode/utf8",
"utf8.RuneCount": "unicode/utf8",
"utf8.RuneCountInString": "unicode/utf8",
"utf8.RuneError": "unicode/utf8",
"utf8.RuneLen": "unicode/utf8",
"utf8.RuneSelf": "unicode/utf8",
"utf8.RuneStart": "unicode/utf8",
"utf8.UTFMax": "unicode/utf8",
"utf8.Valid": "unicode/utf8",
"utf8.ValidRune": "unicode/utf8",
"utf8.ValidString": "unicode/utf8",
"x509.CANotAuthorizedForThisName": "crypto/x509",
"x509.CertPool": "crypto/x509",
"x509.Certificate": "crypto/x509",
"x509.CertificateInvalidError": "crypto/x509",
"x509.CertificateRequest": "crypto/x509",
"x509.ConstraintViolationError": "crypto/x509",
"x509.CreateCertificate": "crypto/x509",
"x509.CreateCertificateRequest": "crypto/x509",
"x509.DSA": "crypto/x509",
"x509.DSAWithSHA1": "crypto/x509",
"x509.DSAWithSHA256": "crypto/x509",
"x509.DecryptPEMBlock": "crypto/x509",
"x509.ECDSA": "crypto/x509",
"x509.ECDSAWithSHA1": "crypto/x509",
"x509.ECDSAWithSHA256": "crypto/x509",
"x509.ECDSAWithSHA384": "crypto/x509",
"x509.ECDSAWithSHA512": "crypto/x509",
"x509.EncryptPEMBlock": "crypto/x509",
"x509.ErrUnsupportedAlgorithm": "crypto/x509",
"x509.Expired": "crypto/x509",
"x509.ExtKeyUsage": "crypto/x509",
"x509.ExtKeyUsageAny": "crypto/x509",
"x509.ExtKeyUsageClientAuth": "crypto/x509",
"x509.ExtKeyUsageCodeSigning": "crypto/x509",
"x509.ExtKeyUsageEmailProtection": "crypto/x509",
"x509.ExtKeyUsageIPSECEndSystem": "crypto/x509",
"x509.ExtKeyUsageIPSECTunnel": "crypto/x509",
"x509.ExtKeyUsageIPSECUser": "crypto/x509",
"x509.ExtKeyUsageMicrosoftServerGatedCrypto": "crypto/x509",
"x509.ExtKeyUsageNetscapeServerGatedCrypto": "crypto/x509",
"x509.ExtKeyUsageOCSPSigning": "crypto/x509",
"x509.ExtKeyUsageServerAuth": "crypto/x509",
"x509.ExtKeyUsageTimeStamping": "crypto/x509",
"x509.HostnameError": "crypto/x509",
"x509.IncompatibleUsage": "crypto/x509",
"x509.IncorrectPasswordError": "crypto/x509",
"x509.InvalidReason": "crypto/x509",
"x509.IsEncryptedPEMBlock": "crypto/x509",
"x509.KeyUsage": "crypto/x509",
"x509.KeyUsageCRLSign": "crypto/x509",
"x509.KeyUsageCertSign": "crypto/x509",
"x509.KeyUsageContentCommitment": "crypto/x509",
"x509.KeyUsageDataEncipherment": "crypto/x509",
"x509.KeyUsageDecipherOnly": "crypto/x509",
"x509.KeyUsageDigitalSignature": "crypto/x509",
"x509.KeyUsageEncipherOnly": "crypto/x509",
"x509.KeyUsageKeyAgreement": "crypto/x509",
"x509.KeyUsageKeyEncipherment": "crypto/x509",
"x509.MD2WithRSA": "crypto/x509",
"x509.MD5WithRSA": "crypto/x509",
"x509.MarshalECPrivateKey": "crypto/x509",
"x509.MarshalPKCS1PrivateKey": "crypto/x509",
"x509.MarshalPKIXPublicKey": "crypto/x509",
"x509.NewCertPool": "crypto/x509",
"x509.NotAuthorizedToSign": "crypto/x509",
"x509.PEMCipher": "crypto/x509",
"x509.PEMCipher3DES": "crypto/x509",
"x509.PEMCipherAES128": "crypto/x509",
"x509.PEMCipherAES192": "crypto/x509",
"x509.PEMCipherAES256": "crypto/x509",
"x509.PEMCipherDES": "crypto/x509",
"x509.ParseCRL": "crypto/x509",
"x509.ParseCertificate": "crypto/x509",
"x509.ParseCertificateRequest": "crypto/x509",
"x509.ParseCertificates": "crypto/x509",
"x509.ParseDERCRL": "crypto/x509",
"x509.ParseECPrivateKey": "crypto/x509",
"x509.ParsePKCS1PrivateKey": "crypto/x509",
"x509.ParsePKCS8PrivateKey": "crypto/x509",
"x509.ParsePKIXPublicKey": "crypto/x509",
"x509.PublicKeyAlgorithm": "crypto/x509",
"x509.RSA": "crypto/x509",
"x509.SHA1WithRSA": "crypto/x509",
"x509.SHA256WithRSA": "crypto/x509",
"x509.SHA384WithRSA": "crypto/x509",
"x509.SHA512WithRSA": "crypto/x509",
"x509.SignatureAlgorithm": "crypto/x509",
"x509.SystemRootsError": "crypto/x509",
"x509.TooManyIntermediates": "crypto/x509",
"x509.UnhandledCriticalExtension": "crypto/x509",
"x509.UnknownAuthorityError": "crypto/x509",
"x509.UnknownPublicKeyAlgorithm": "crypto/x509",
"x509.UnknownSignatureAlgorithm": "crypto/x509",
"x509.VerifyOptions": "crypto/x509",
"xml.Attr": "encoding/xml",
"xml.CharData": "encoding/xml",
"xml.Comment": "encoding/xml",
"xml.CopyToken": "encoding/xml",
"xml.Decoder": "encoding/xml",
"xml.Directive": "encoding/xml",
"xml.Encoder": "encoding/xml",
"xml.EndElement": "encoding/xml",
"xml.Escape": "encoding/xml",
"xml.EscapeText": "encoding/xml",
"xml.HTMLAutoClose": "encoding/xml",
"xml.HTMLEntity": "encoding/xml",
"xml.Header": "encoding/xml",
"xml.Marshal": "encoding/xml",
"xml.MarshalIndent": "encoding/xml",
"xml.Marshaler": "encoding/xml",
"xml.MarshalerAttr": "encoding/xml",
"xml.Name": "encoding/xml",
"xml.NewDecoder": "encoding/xml",
"xml.NewEncoder": "encoding/xml",
"xml.ProcInst": "encoding/xml",
"xml.StartElement": "encoding/xml",
"xml.SyntaxError": "encoding/xml",
"xml.TagPathError": "encoding/xml",
"xml.Token": "encoding/xml",
"xml.Unmarshal": "encoding/xml",
"xml.UnmarshalError": "encoding/xml",
"xml.Unmarshaler": "encoding/xml",
"xml.UnmarshalerAttr": "encoding/xml",
"xml.UnsupportedTypeError": "encoding/xml",
"zip.Compressor": "archive/zip",
"zip.Decompressor": "archive/zip",
"zip.Deflate": "archive/zip",
"zip.ErrAlgorithm": "archive/zip",
"zip.ErrChecksum": "archive/zip",
"zip.ErrFormat": "archive/zip",
"zip.File": "archive/zip",
"zip.FileHeader": "archive/zip",
"zip.FileInfoHeader": "archive/zip",
"zip.NewReader": "archive/zip",
"zip.NewWriter": "archive/zip",
"zip.OpenReader": "archive/zip",
"zip.ReadCloser": "archive/zip",
"zip.Reader": "archive/zip",
"zip.RegisterCompressor": "archive/zip",
"zip.RegisterDecompressor": "archive/zip",
"zip.Store": "archive/zip",
"zip.Writer": "archive/zip",
"zlib.BestCompression": "compress/zlib",
"zlib.BestSpeed": "compress/zlib",
"zlib.DefaultCompression": "compress/zlib",
"zlib.ErrChecksum": "compress/zlib",
"zlib.ErrDictionary": "compress/zlib",
"zlib.ErrHeader": "compress/zlib",
"zlib.NewReader": "compress/zlib",
"zlib.NewReaderDict": "compress/zlib",
"zlib.NewWriter": "compress/zlib",
"zlib.NewWriterLevel": "compress/zlib",
"zlib.NewWriterLevelDict": "compress/zlib",
"zlib.NoCompression": "compress/zlib",
"zlib.Resetter": "compress/zlib",
"zlib.Writer": "compress/zlib",
}
| [] | [] | [] | [] | [] | go | 0 | 0 | |
main.go | package main
import (
"fmt"
"github.com/fsnotify/fsnotify"
"github.com/optiopay/kafka"
"github.com/optiopay/kafka/proto"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
)
type Broker struct {
topic string
partition int
hostname string
}
func newBroker(hostname string, topic string, partition int) *Broker {
return &Broker{
topic: topic,
partition: partition,
hostname: hostname,
}
}
func isFileValid(f *os.File) bool {
fi, err := f.Stat()
if err != nil {
fmt.Println(err)
return false
}
mode := fi.Mode()
if mode.IsRegular() {
return true
}
return false
}
func produceFile(broker kafka.Client, topic string, partition int, data []byte, key []byte) {
producer := broker.Producer(kafka.NewProducerConf())
msg := &proto.Message{
Value: data,
Key: key}
if _, err := producer.Produce(topic, int32(partition), msg); err != nil {
log.Fatalf("cannot produce message to %s:%d: %s", topic, partition, err)
}
}
func check(e error) {
if e != nil {
panic(e)
}
}
func getAllSubdirectories(parentPath string) (paths []string, err error) {
walkFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
paths = append(paths, path)
}
return nil
}
err = filepath.Walk(parentPath, walkFn)
return paths, err
}
func processFile(file string) {
partition, err1 := strconv.Atoi(os.Getenv("PARTITION"))
if err1 != nil {
partition = 0
}
topic := os.Getenv("TOPIC")
kafkaAddrs := strings.Split(os.Getenv("BROKERS"), ",")
name := os.Getenv("CLIENT_NAME")
broker, err2 := kafka.Dial(kafkaAddrs, kafka.NewBrokerConf(name))
check(err2)
defer broker.Close()
if file != "" {
f, err3 := os.Open(file)
if err3 == nil {
data, err4 := ioutil.ReadAll(f)
check(err4)
produceFile(broker, topic, partition, data, []byte(file))
}
} else {
fmt.Println("File not defined")
}
}
func main() {
sourcedir := os.Getenv("SOURCEDIR")
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
go func() {
for {
select {
case event := <-watcher.Events:
if event.Op != fsnotify.Chmod {
log.Println("Pushing Files: " + event.Name)
processFile(event.Name)
}
case err := <-watcher.Errors:
log.Fatal(err)
}
}
}()
if sourcedir != "" {
paths, err := getAllSubdirectories(sourcedir)
if err != nil {
log.Fatal(err)
}
for _, path := range paths {
watcher.Add(path)
}
} else {
fmt.Println("Please, enter a valid SOURCEDIR to inspect")
}
// Daemon mode
for {
time.Sleep(1000 * time.Millisecond)
}
}
| [
"\"PARTITION\"",
"\"TOPIC\"",
"\"BROKERS\"",
"\"CLIENT_NAME\"",
"\"SOURCEDIR\""
] | [] | [
"BROKERS",
"PARTITION",
"SOURCEDIR",
"CLIENT_NAME",
"TOPIC"
] | [] | ["BROKERS", "PARTITION", "SOURCEDIR", "CLIENT_NAME", "TOPIC"] | go | 5 | 0 | |
buildenv/envs.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package buildenv contains definitions for the
// environments the Go build system can run in.
package buildenv
import (
"context"
"flag"
"fmt"
"log"
"math/rand"
"os"
"path/filepath"
"strings"
"sync"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
compute "google.golang.org/api/compute/v1"
oauth2api "google.golang.org/api/oauth2/v2"
)
const (
prefix = "https://www.googleapis.com/compute/v1/projects/"
)
// KubeConfig describes the configuration of a Kubernetes cluster.
type KubeConfig struct {
// MinNodes is the minimum number of nodes in the Kubernetes cluster.
// The autoscaler will ensure that at least this many nodes is always
// running despite any scale-down decision.
MinNodes int64
// MaxNodes is the maximum number of nodes that the autoscaler can
// provision in the Kubernetes cluster.
// If MaxNodes is 0, Kubernetes is not used.
MaxNodes int64
// MachineType is the GCE machine type to use for the Kubernetes cluster nodes.
MachineType string
// Name is the name of the Kubernetes cluster that will be created.
Name string
// Namespace is the Kubernetes namespace to use within the cluster.
Namespace string
}
// Environment describes the configuration of the infrastructure for a
// coordinator and its buildlet resources running on Google Cloud Platform.
// Staging and Production are the two common build environments.
type Environment struct {
// The GCP project name that the build infrastructure will be provisioned in.
// This field may be overridden as necessary without impacting other fields.
ProjectName string
// ProjectNumber is the GCP build infrastructure project's number, as visible
// in the admin console. This is used for things such as constructing the
// "email" of the default service account.
ProjectNumber int64
// The GCP project name for the Go project, where build status is stored.
// This field may be overridden as necessary without impacting other fields.
GoProjectName string
// The IsProd flag indicates whether production functionality should be
// enabled. When true, GCE and Kubernetes builders are enabled and the
// coordinator serves on 443. Otherwise, GCE and Kubernetes builders are
// disabled and the coordinator serves on 8119.
IsProd bool
// ControlZone is the GCE zone that the coordinator instance and Kubernetes cluster
// will run in. This field may be overridden as necessary without impacting
// other fields.
ControlZone string
// VMZones are the GCE zones that the VMs will be deployed to. These
// GCE zones will be periodically cleaned by deleting old VMs. The zones
// should all exist within a single region.
VMZones []string
// StaticIP is the public, static IP address that will be attached to the
// coordinator instance. The zero value means the address will be looked
// up by name. This field is optional.
StaticIP string
// MachineType is the GCE machine type to use for the coordinator.
MachineType string
// KubeBuild is the Kubernetes config for the buildlet cluster.
KubeBuild KubeConfig
// KubeTools is the Kubernetes config for the tools cluster.
KubeTools KubeConfig
// PreferContainersOnCOS controls whether we do most builds on
// Google's Container-Optimized OS Linux image running on a VM
// rather than using Kubernetes for builds. This does not
// affect cross-compiled builds just running make.bash. Those
// still use Kubernetes for now.
// See https://golang.org/issue/25108.
PreferContainersOnCOS bool
// DashURL is the base URL of the build dashboard, ending in a slash.
DashURL string
// PerfDataURL is the base URL of the benchmark storage server.
PerfDataURL string
// CoordinatorName is the hostname of the coordinator instance.
CoordinatorName string
// BuildletBucket is the GCS bucket that stores buildlet binaries.
// TODO: rename. this is not just for buildlets; also for bootstrap.
BuildletBucket string
// LogBucket is the GCS bucket to which logs are written.
LogBucket string
// SnapBucket is the GCS bucket to which snapshots of
// completed builds (after make.bash, before tests) are
// written.
SnapBucket string
// MaxBuilds is the maximum number of concurrent builds that
// can run. Zero means unlimited. This is typically only used
// in a development or staging environment.
MaxBuilds int
// AutoCertCacheBucket is the GCS bucket to use for the
// golang.org/x/crypto/acme/autocert (LetsEncrypt) cache.
// If empty, LetsEncrypt isn't used.
AutoCertCacheBucket string
// COSServiceAccount (Container Optimized OS) is the service
// account that will be assigned to a VM instance that hosts
// a container when the instance is created.
COSServiceAccount string
// AWSSecurityGroup is the security group name that any VM instance
// created on EC2 should contain. These security groups are
// collections of firewall rules to be applied to the VM.
AWSSecurityGroup string
// AWSRegion is the region where AWS resources are deployed.
AWSRegion string
}
// ComputePrefix returns the URI prefix for Compute Engine resources in a project.
func (e Environment) ComputePrefix() string {
return prefix + e.ProjectName
}
// RandomVMZone returns a randomly selected zone from the zones in VMZones.
// The Zone value will be returned if VMZones is not set.
func (e Environment) RandomVMZone() string {
if len(e.VMZones) == 0 {
return e.ControlZone
}
return e.VMZones[rand.Intn(len(e.VMZones))]
}
// Region returns the GCE region, derived from its zone.
func (e Environment) Region() string {
return e.ControlZone[:strings.LastIndex(e.ControlZone, "-")]
}
// SnapshotURL returns the absolute URL of the .tar.gz containing a
// built Go tree for the builderType and Go rev (40 character Git
// commit hash). The tarball is suitable for passing to
// (*buildlet.Client).PutTarFromURL.
func (e Environment) SnapshotURL(builderType, rev string) string {
return fmt.Sprintf("https://storage.googleapis.com/%s/go/%s/%s.tar.gz", e.SnapBucket, builderType, rev)
}
// DashBase returns the base URL of the build dashboard, ending in a slash.
func (e Environment) DashBase() string {
// TODO(quentin): Should we really default to production? That's what the old code did.
if e.DashURL != "" {
return e.DashURL
}
return Production.DashURL
}
// Credentials returns the credentials required to access the GCP environment
// with the necessary scopes.
func (e Environment) Credentials(ctx context.Context) (*google.Credentials, error) {
// TODO: this method used to do much more. maybe remove it
// when TODO below is addressed, pushing scopes to caller? Or
// add a Scopes func/method somewhere instead?
scopes := []string{
// Cloud Platform should include all others, but the
// old code duplicated compute and the storage full
// control scopes, so I leave them here for now. They
// predated the all-encompassing "cloud platform"
// scope anyway.
// TODO: remove compute and DevstorageFullControlScope once verified to work
// without.
compute.CloudPlatformScope,
compute.ComputeScope,
compute.DevstorageFullControlScope,
// The coordinator needed the userinfo email scope for
// reporting to the perf dashboard running on App
// Engine at one point. The perf dashboard is down at
// the moment, but when it's back up we'll need this,
// and if we do other authenticated requests to App
// Engine apps, this would be useful.
oauth2api.UserinfoEmailScope,
}
creds, err := google.FindDefaultCredentials(ctx, scopes...)
if err != nil {
CheckUserCredentials()
return nil, err
}
creds.TokenSource = diagnoseFailureTokenSource{creds.TokenSource}
return creds, nil
}
// ByProjectID returns an Environment for the specified
// project ID. It is currently limited to the symbolic-datum-552
// and go-dashboard-dev projects.
// ByProjectID will panic if the project ID is not known.
func ByProjectID(projectID string) *Environment {
var envKeys []string
for k := range possibleEnvs {
envKeys = append(envKeys, k)
}
var env *Environment
env, ok := possibleEnvs[projectID]
if !ok {
panic(fmt.Sprintf("Can't get buildenv for unknown project %q. Possible envs are %s", projectID, envKeys))
}
return env
}
// Staging defines the environment that the coordinator and build
// infrastructure is deployed to before it is released to production.
// For local dev, override the project with the program's flag to set
// a custom project.
var Staging = &Environment{
ProjectName: "go-dashboard-dev",
ProjectNumber: 302018677728,
GoProjectName: "go-dashboard-dev",
IsProd: true,
ControlZone: "us-central1-f",
VMZones: []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-f"},
StaticIP: "104.154.113.235",
MachineType: "n1-standard-1",
PreferContainersOnCOS: true,
KubeBuild: KubeConfig{
MinNodes: 1,
MaxNodes: 1, // auto-scaling disabled
Name: "buildlets",
MachineType: "n1-standard-4", // only used for make.bash due to PreferContainersOnCOS
},
KubeTools: KubeConfig{
MinNodes: 3,
MaxNodes: 3,
Name: "go",
MachineType: "n1-standard-4",
Namespace: "default",
},
DashURL: "https://build-staging.golang.org/",
PerfDataURL: "https://perfdata.golang.org",
CoordinatorName: "farmer",
BuildletBucket: "dev-go-builder-data",
LogBucket: "dev-go-build-log",
SnapBucket: "dev-go-build-snap",
COSServiceAccount: "[email protected]",
AWSSecurityGroup: "staging-go-builders",
AWSRegion: "us-east-1",
}
// Production defines the environment that the coordinator and build
// infrastructure is deployed to for production usage at build.golang.org.
var Production = &Environment{
ProjectName: "symbolic-datum-552",
ProjectNumber: 872405196845,
GoProjectName: "golang-org",
IsProd: true,
ControlZone: "us-central1-f",
VMZones: []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-f"},
StaticIP: "107.178.219.46",
MachineType: "n1-standard-4",
PreferContainersOnCOS: true,
KubeBuild: KubeConfig{
MinNodes: 2,
MaxNodes: 2, // auto-scaling disabled
Name: "buildlets",
MachineType: "n1-standard-4", // only used for make.bash due to PreferContainersOnCOS
},
KubeTools: KubeConfig{
MinNodes: 4,
MaxNodes: 4,
Name: "go",
MachineType: "n1-standard-4",
Namespace: "prod",
},
DashURL: "https://build.golang.org/",
PerfDataURL: "https://perfdata.golang.org",
CoordinatorName: "farmer",
BuildletBucket: "go-builder-data",
LogBucket: "go-build-log",
SnapBucket: "go-build-snap",
AutoCertCacheBucket: "farmer-golang-org-autocert-cache",
COSServiceAccount: "[email protected]",
AWSSecurityGroup: "go-builders",
AWSRegion: "us-east-2",
}
var Development = &Environment{
GoProjectName: "golang-org",
IsProd: false,
StaticIP: "127.0.0.1",
}
// possibleEnvs enumerate the known buildenv.Environment definitions.
var possibleEnvs = map[string]*Environment{
"dev": Development,
"symbolic-datum-552": Production,
"go-dashboard-dev": Staging,
}
var (
stagingFlag bool
localDevFlag bool
registeredFlags bool
)
// RegisterFlags registers the "staging" and "localdev" flags.
func RegisterFlags() {
if registeredFlags {
panic("duplicate call to RegisterFlags or RegisterStagingFlag")
}
flag.BoolVar(&localDevFlag, "localdev", false, "use the localhost in-development coordinator")
RegisterStagingFlag()
registeredFlags = true
}
// RegisterStagingFlag registers the "staging" flag.
func RegisterStagingFlag() {
if registeredFlags {
panic("duplicate call to RegisterFlags or RegisterStagingFlag")
}
flag.BoolVar(&stagingFlag, "staging", false, "use the staging build coordinator and buildlets")
registeredFlags = true
}
// FromFlags returns the build environment specified from flags,
// as registered by RegisterFlags or RegisterStagingFlag.
// By default it returns the production environment.
func FromFlags() *Environment {
if !registeredFlags {
panic("FromFlags called without RegisterFlags")
}
if localDevFlag {
return Development
}
if stagingFlag {
return Staging
}
return Production
}
// warnCredsOnce guards CheckUserCredentials spamming stderr. Once is enough.
var warnCredsOnce sync.Once
// CheckUserCredentials warns if the gcloud Application Default Credentials file doesn't exist
// and says how to log in properly.
func CheckUserCredentials() {
adcJSON := filepath.Join(os.Getenv("HOME"), ".config/gcloud/application_default_credentials.json")
if _, err := os.Stat(adcJSON); os.IsNotExist(err) {
warnCredsOnce.Do(func() {
log.Printf("warning: file %s does not exist; did you run 'gcloud auth application-default login' ? (The 'application-default' part matters, confusingly.)", adcJSON)
})
}
}
// diagnoseFailureTokenSource is an oauth2.TokenSource wrapper that,
// upon failure, diagnoses why the token acquistion might've failed.
type diagnoseFailureTokenSource struct {
ts oauth2.TokenSource
}
func (ts diagnoseFailureTokenSource) Token() (*oauth2.Token, error) {
t, err := ts.ts.Token()
if err != nil {
CheckUserCredentials()
return nil, err
}
return t, nil
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
watch.go | // Package watcher watches all file changes via fsnotify package and sends
// update events to builder
package watcher
import (
"errors"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"time"
fsnotify "gopkg.in/fsnotify.v1"
)
// ErrPathNotSet GOPTATH not set error
var ErrPathNotSet = errors.New("GOPTATH not set")
var watchedFileExt = []string{".go", ".tmpl", ".tpl", ".html"}
var watchDelta = 1000 * time.Millisecond
// Watcher watches the file change events from fsnotify and
// sends update messages. It is also used as a fsnotify.Watcher wrapper
type Watcher struct {
rootdir string
watcher *fsnotify.Watcher
watchVendor bool
// when a file gets changed a message is sent to the update channel
update chan struct{}
}
// MustRegisterWatcher creates a new Watcher and starts listening to
// given folders
func MustRegisterWatcher(params *Params) (*Watcher, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
w := Watcher{
rootdir: params.RootDir,
watcher: watcher,
watchVendor: params.WatchVendor,
update: make(chan struct{}),
}
// add folders that will be watched
w.watchFolders()
return &w, nil
}
// Watch listens file updates, and sends signal to
// update channel when .go and .tmpl files are updated
func (w *Watcher) Watch() {
eventSent := false
for {
select {
case event := <-w.watcher.Events:
// discard chmod events
if event.Op&fsnotify.Chmod != fsnotify.Chmod {
// test files do not need a rebuild
if isTestFile(event.Name) {
continue
}
if !isWatchedFileType(event.Name) {
continue
}
if eventSent {
continue
}
eventSent = true
// prevent consequent builds
go func() {
w.update <- struct{}{}
time.Sleep(watchDelta)
eventSent = false
}()
}
case err := <-w.watcher.Errors:
if err != nil {
log.Fatalf("Watcher error: %s", err)
}
return
}
}
}
func isTestFile(fileName string) bool {
return strings.HasSuffix(filepath.Base(fileName), "_test.go")
}
func isWatchedFileType(fileName string) bool {
ext := filepath.Ext(fileName)
return existIn(ext, watchedFileExt)
}
// Wait waits for the latest messages
func (w *Watcher) Wait() <-chan struct{} {
return w.update
}
// Close closes the fsnotify watcher channel
func (w *Watcher) Close() {
w.watcher.Close()
close(w.update)
}
// watchFolders recursively adds folders that will be watched against the changes,
// starting from the working directory
func (w *Watcher) watchFolders() {
wd, err := w.prepareRootDir()
if err != nil {
log.Fatalf("Could not get root working directory: %s", err)
}
filepath.Walk(wd, func(path string, info os.FileInfo, err error) error {
// skip files
if info == nil {
log.Fatalf("wrong watcher package: %s", path)
}
if !info.IsDir() {
return nil
}
if !w.watchVendor {
// skip vendor directory
vendor := fmt.Sprintf("%s/vendor", wd)
if strings.HasPrefix(path, vendor) {
return filepath.SkipDir
}
}
// skip hidden folders
if len(path) > 1 && strings.HasPrefix(filepath.Base(path), ".") {
return filepath.SkipDir
}
w.addFolder(path)
return err
})
}
// addFolder adds given folder name to the watched folders, and starts
// watching it for further changes
func (w *Watcher) addFolder(name string) {
if err := w.watcher.Add(name); err != nil {
log.Fatalf("Could not watch folder: %s", err)
}
}
// prepareRootDir prepares working directory depending on root directory
func (w *Watcher) prepareRootDir() (string, error) {
if w.rootdir == "" {
return os.Getwd()
}
path := os.Getenv("GOPATH")
if path == "" {
return "", ErrPathNotSet
}
root := fmt.Sprintf("%s/src/%s", path, w.rootdir)
return root, nil
}
| [
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.