filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
block_timestamp.py
|
#!/usr/bin/env python3
import time
import datetime
import argparse
from web3 import Web3
from dotenv import load_dotenv
import requests
import os
load_dotenv()
# etherscan.io API:
etherscan_api = "https://api.etherscan.io/api"
# Get API keys from .env file:
etherscan_key = os.environ.get("ETHERSCAN_KEY")
# ETH node API:
eth_node_api = os.environ.get("ETH_NODE_API")
def get_block(timestamp):
API_ENDPOINT = etherscan_api+"?module=block&action=getblocknobytime&closest=before×tamp="+str(timestamp)+"&apikey="+etherscan_key
r = requests.get(url = API_ENDPOINT)
response = r.json()
return int(response["result"])
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--timestamp", type=int, help="get the latest block for this timestamp")
parser.add_argument("-b", "--block", type=int, help="get the block timestamp")
args = parser.parse_args()
# HTTPProvider:
w3 = Web3(Web3.HTTPProvider(eth_node_api))
if args.block:
block = args.block
timestamp = w3.eth.getBlock(block).timestamp
r_timestamp = timestamp
elif args.timestamp:
r_timestamp = args.timestamp
block = get_block(r_timestamp)
timestamp = w3.eth.getBlock(block).timestamp
else:
block = w3.eth.blockNumber
timestamp = w3.eth.getBlock(block).timestamp
r_timestamp = int(time.time())
print('Requested timestamp: %d is %s UTC' % (r_timestamp, datetime.datetime.utcfromtimestamp(r_timestamp)))
print('Block timestamp: %d is %s UTC' % (timestamp, datetime.datetime.utcfromtimestamp(timestamp)))
print('Block number: %d' % (block))
|
[] |
[] |
[
"ETHERSCAN_KEY",
"ETH_NODE_API"
] |
[]
|
["ETHERSCAN_KEY", "ETH_NODE_API"]
|
python
| 2 | 0 | |
src/the_tale/the_tale/portal/conf.py
|
import smart_imports
smart_imports.all()
SITE_SECTIONS = ((re.compile(r'^/$'), 'index'),
(re.compile(r'^/search.*$'), 'search'),
(re.compile(r'^/chat.*$'), 'chat'),
(re.compile(r'^/news.*$'), 'news'),
(re.compile(r'^/forum.*$'), 'forum'),
(re.compile(r'^/chat.*$'), 'chat'),
(re.compile(r'^/shop.*$'), 'shop'),
(re.compile(r'^/linguistics.*$'), 'world'),
(re.compile(r'^/accounts/auth.*$'), 'auth'),
(re.compile(r'^/accounts/profile.*$'), 'profile'),
(re.compile(r'^/accounts/messages.*$'), 'personal_messages'),
(re.compile(r'^/accounts/.*$'), 'community'),
(re.compile(r'^/game/heroes.*$'), 'hero'),
(re.compile(r'^/game/bills.*$'), 'world'),
(re.compile(r'^/game/chronicle.*$'), 'world'),
(re.compile(r'^/game/ratings.*$'), 'community'),
(re.compile(r'^/game/pvp/calls.*$'), 'world'),
(re.compile(r'^/game/map/'), 'map'),
(re.compile(r'^/game/map.*$'), None),
(re.compile(r'^/game.*$'), 'game'),
(re.compile(r'^/guide.*$'), 'guide'))
settings = utils_app_settings.app_settings('PORTAL',
FAQ_URL=django_reverse_lazy('forum:threads:show', args=[126]),
PLAYERS_PROJECTS_URL=django_reverse_lazy('forum:subcategories:show', args=[43]),
ERRORS_URL=django_reverse_lazy('forum:subcategory', args=['erros']),
BILLS_ON_INDEX=7,
CHRONICLE_RECORDS_ON_INDEX=10,
FORUM_COMMON_THREADS_ON_INDEX=9,
FORUM_CLAN_THREADS_ON_INDEX=4,
FORUM_RPG_THREADS_ON_INDEX=4,
FORUM_GAMES_THREADS_ON_INDEX=4,
BLOG_POSTS_ON_INDEX=7,
SETTINGS_ACCOUNT_OF_THE_DAY_KEY='account of the day',
FIRST_EDITION_DATE=datetime.datetime(2012, 10, 29),
NEWS_ON_INDEX=3,
FORUM_RPG_SUBCATEGORY='forum_rpg',
FORUM_GAMES_SUBCATEGORY='forum_games',
PREMIUM_DAYS_FOR_HERO_OF_THE_DAY=30,
ENABLE_WORKER_LONG_COMMANDS=True,
SETTINGS_CDN_INFO_KEY='cdn info',
TT_DISCORD_ENTRY_POINT='http://localhost:10022/',
DISCORD_BIND_CODE_EXPIRE_TIMEOUT=10*60,
LANDING_MOB_DESCRIPTION_MAX_LENGTH=1000)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
fenxlib.core.impl/src/main/java/com/legyver/fenxlib/core/impl/config/load/ApplicationHome.java
|
package com.legyver.fenxlib.core.impl.config.load;
import com.legyver.fenxlib.core.impl.files.LazyCreateDirectoryWrapper;
import java.io.File;
/**
* Where to save internal application files.
* Windows: %APPDATA%\<appName>
* Others: user.home/.<appName>
*
* Default root directories include
* cache
* config
* logs
*/
public class ApplicationHome implements ApplicationConfigProvider {
/**
* suffix used in setting up logging
* ie: (src/main/resources/log4j2.xml)
* <properties>
* <property name="patternlayout">%d{dd/MM/yyyy HH:mm:ss} %5p %c{1}:%L - %m%n</property>
* <property name="filename">${sys: <appName>.home.logging}/application.log</property>
* <property name="filenamePattern">${sys: <appName>.home.logging}/application-%d{yyyy-MM-dd}.log.gz</property>
* </properties>
*/
public static final String APP_HOME_SUFFIX = ".home";
private final LazyCreateDirectoryWrapper appHome;
private final LazyCreateDirectoryWrapper cacheDirectory;
private final LazyCreateDirectoryWrapper configDirectory;
private final LazyCreateDirectoryWrapper logDirectory;
/**
* Construct an Application Home based on the provided app name.
* - Creates a directory in the appropriate location for all config files, cached files, and log files for the application to be saved.
* -- In Windows this is under %AppData%
* -- For other OS's it will be under user.home
* @param appName the name of the application.
*/
public ApplicationHome(String appName) {
String osName = System.getProperty("os.name");
String appDir;
if (osName != null && osName.startsWith("Windows")) {
String appData = System.getenv("APPDATA");
appDir = appData + File.separator + appName;
} else {
appDir = System.getProperty("user.home") + File.separator + appName;
}
appHome = new LazyCreateDirectoryWrapper(new File(appDir));
cacheDirectory = new LazyCreateDirectoryWrapper(new File(appHome.getAbsolutePath() + File.separator + "cache"));
configDirectory = new LazyCreateDirectoryWrapper(new File(appHome.getAbsolutePath() + File.separator + "config"));
logDirectory = new LazyCreateDirectoryWrapper(new File(appHome.getAbsolutePath() + File.separator + "logs"));
}
/**
* Get the Application Home directory where all managed application files are kept.
* @return the application home directory
*/
public File getAppHome() {
return appHome.getDirectory();
}
/**
* Get the cache directory where all the files cached by the application are kept
* @return the cache directory
*/
public File getCacheDirectory() {
return cacheDirectory.getDirectory();
}
/**
* Get the config directory where all the application config files are kept
* @return the config directory
*/
public File getConfigDirectory() {
return configDirectory.getDirectory();
}
/**
* Get the log directory where all the application log files are kept
* @return the logging directory
*/
public File getLogDirectory() {
return logDirectory.getDirectory();
}
/**
* Load a file from the application cache
* @param name the name of the file
* @return the cached file
*/
public File getCacheFile(String name) {
return cacheDirectory.loadFileFromDir(name);
}
@Override
public String getApplicationConfigFilename() {
return configDirectory.getFullFilename("config.json");
}
}
|
[
"\"APPDATA\""
] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
java
| 1 | 0 | |
yocto/poky/scripts/lib/devtool/__init__.py
|
#!/usr/bin/env python
# Development tool - utility functions for plugins
#
# Copyright (C) 2014 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugins module"""
import os
import sys
import subprocess
import logging
import re
logger = logging.getLogger('devtool')
class DevtoolError(Exception):
"""Exception for handling devtool errors"""
pass
def exec_build_env_command(init_path, builddir, cmd, watch=False, **options):
"""Run a program in bitbake build context"""
import bb
if not 'cwd' in options:
options["cwd"] = builddir
if init_path:
# As the OE init script makes use of BASH_SOURCE to determine OEROOT,
# and can't determine it when running under dash, we need to set
# the executable to bash to correctly set things up
if not 'executable' in options:
options['executable'] = 'bash'
logger.debug('Executing command: "%s" using init path %s' % (cmd, init_path))
init_prefix = '. %s %s > /dev/null && ' % (init_path, builddir)
else:
logger.debug('Executing command "%s"' % cmd)
init_prefix = ''
if watch:
if sys.stdout.isatty():
# Fool bitbake into thinking it's outputting to a terminal (because it is, indirectly)
cmd = 'script -e -q -c "%s" /dev/null' % cmd
return exec_watch('%s%s' % (init_prefix, cmd), **options)
else:
return bb.process.run('%s%s' % (init_prefix, cmd), **options)
def exec_watch(cmd, **options):
"""Run program with stdout shown on sys.stdout"""
import bb
if isinstance(cmd, basestring) and not "shell" in options:
options["shell"] = True
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **options
)
buf = ''
while True:
out = process.stdout.read(1)
if out:
sys.stdout.write(out)
sys.stdout.flush()
buf += out
elif out == '' and process.poll() != None:
break
if process.returncode != 0:
raise bb.process.ExecutionError(cmd, process.returncode, buf, None)
return buf, None
def exec_fakeroot(d, cmd, **kwargs):
"""Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
# Grab the command and check it actually exists
fakerootcmd = d.getVar('FAKEROOTCMD', True)
if not os.path.exists(fakerootcmd):
logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
return 2
# Set up the appropriate environment
newenv = dict(os.environ)
fakerootenv = d.getVar('FAKEROOTENV', True)
for varvalue in fakerootenv.split():
if '=' in varvalue:
splitval = varvalue.split('=', 1)
newenv[splitval[0]] = splitval[1]
return subprocess.call("%s %s" % (fakerootcmd, cmd), env=newenv, **kwargs)
def setup_tinfoil(config_only=False, basepath=None, tracking=False):
"""Initialize tinfoil api from bitbake"""
import scriptpath
orig_cwd = os.path.abspath(os.curdir)
try:
if basepath:
os.chdir(basepath)
bitbakepath = scriptpath.add_bitbake_lib_path()
if not bitbakepath:
logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
sys.exit(1)
import bb.tinfoil
tinfoil = bb.tinfoil.Tinfoil(tracking=tracking)
tinfoil.prepare(config_only)
tinfoil.logger.setLevel(logger.getEffectiveLevel())
finally:
os.chdir(orig_cwd)
return tinfoil
def get_recipe_file(cooker, pn):
"""Find recipe file corresponding a package name"""
import oe.recipeutils
recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
if not recipefile:
skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
if skipreasons:
logger.error('\n'.join(skipreasons))
else:
logger.error("Unable to find any recipe file matching %s" % pn)
return recipefile
def parse_recipe(config, tinfoil, pn, appends, filter_workspace=True):
"""Parse recipe of a package"""
import oe.recipeutils
recipefile = get_recipe_file(tinfoil.cooker, pn)
if not recipefile:
# Error already logged
return None
if appends:
append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
if filter_workspace:
# Filter out appends from the workspace
append_files = [path for path in append_files if
not path.startswith(config.workspace_path)]
else:
append_files = None
return oe.recipeutils.parse_recipe(recipefile, append_files,
tinfoil.config_data)
def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
"""
Check that a recipe is in the workspace and (optionally) that source
is present.
"""
workspacepn = pn
for recipe, value in workspace.iteritems():
if recipe == pn:
break
if bbclassextend:
recipefile = value['recipefile']
if recipefile:
targets = get_bbclassextend_targets(recipefile, recipe)
if pn in targets:
workspacepn = recipe
break
else:
raise DevtoolError("No recipe named '%s' in your workspace" % pn)
if checksrc:
srctree = workspace[workspacepn]['srctree']
if not os.path.exists(srctree):
raise DevtoolError("Source tree %s for recipe %s does not exist" % (srctree, workspacepn))
if not os.listdir(srctree):
raise DevtoolError("Source tree %s for recipe %s is empty" % (srctree, workspacepn))
return workspacepn
def use_external_build(same_dir, no_same_dir, d):
"""
Determine if we should use B!=S (separate build and source directories) or not
"""
b_is_s = True
if no_same_dir:
logger.info('Using separate build directory since --no-same-dir specified')
b_is_s = False
elif same_dir:
logger.info('Using source tree as build directory since --same-dir specified')
elif bb.data.inherits_class('autotools-brokensep', d):
logger.info('Using source tree as build directory since recipe inherits autotools-brokensep')
elif d.getVar('B', True) == os.path.abspath(d.getVar('S', True)):
logger.info('Using source tree as build directory since that would be the default for this recipe')
else:
b_is_s = False
return b_is_s
def setup_git_repo(repodir, version, devbranch, basetag='devtool-base'):
"""
Set up the git repository for the source tree
"""
import bb.process
if not os.path.exists(os.path.join(repodir, '.git')):
bb.process.run('git init', cwd=repodir)
bb.process.run('git add .', cwd=repodir)
commit_cmd = ['git', 'commit', '-q']
stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
if not stdout:
commit_cmd.append('--allow-empty')
commitmsg = "Initial empty commit with no upstream sources"
elif version:
commitmsg = "Initial commit from upstream at version %s" % version
else:
commitmsg = "Initial commit from upstream"
commit_cmd += ['-m', commitmsg]
bb.process.run(commit_cmd, cwd=repodir)
bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
bb.process.run('git tag -f %s' % basetag, cwd=repodir)
def recipe_to_append(recipefile, config, wildcard=False):
"""
Convert a recipe file to a bbappend file path within the workspace.
NOTE: if the bbappend already exists, you should be using
workspace[args.recipename]['bbappend'] instead of calling this
function.
"""
appendname = os.path.splitext(os.path.basename(recipefile))[0]
if wildcard:
appendname = re.sub(r'_.*', '_%', appendname)
appendpath = os.path.join(config.workspace_path, 'appends')
appendfile = os.path.join(appendpath, appendname + '.bbappend')
return appendfile
def get_bbclassextend_targets(recipefile, pn):
"""
Cheap function to get BBCLASSEXTEND and then convert that to the
list of targets that would result.
"""
import bb.utils
values = {}
def get_bbclassextend_varfunc(varname, origvalue, op, newlines):
values[varname] = origvalue
return origvalue, None, 0, True
with open(recipefile, 'r') as f:
bb.utils.edit_metadata(f, ['BBCLASSEXTEND'], get_bbclassextend_varfunc)
targets = []
bbclassextend = values.get('BBCLASSEXTEND', '').split()
if bbclassextend:
for variant in bbclassextend:
if variant == 'nativesdk':
targets.append('%s-%s' % (variant, pn))
elif variant in ['native', 'cross', 'crosssdk']:
targets.append('%s-%s' % (pn, variant))
return targets
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mlbslicedb.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
updateHostsFile.py
|
#!/usr/bin/env python3
# Script by Ben Limmer
# https://github.com/l1m5
#
# This Python script will combine all the host files you provide
# as sources into one, unique host file to keep your internet browsing happy.
import argparse
import fnmatch
import json
import locale
import os
import platform
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from glob import glob
import lxml # noqa: F401
from bs4 import BeautifulSoup
# Detecting Python 3 for version-dependent implementations
PY3 = sys.version_info >= (3, 0)
if PY3:
from urllib.request import urlopen
else:
raise Exception("We do not support Python 2 anymore.")
# Syntactic sugar for "sudo" command in UNIX / Linux
if platform.system() == "OpenBSD":
SUDO = ["/usr/bin/doas"]
else:
SUDO = ["/usr/bin/env", "sudo"]
# Project Settings
BASEDIR_PATH = os.path.dirname(os.path.realpath(__file__))
def get_defaults():
"""
Helper method for getting the default settings.
Returns
-------
default_settings : dict
A dictionary of the default settings when updating host information.
"""
return {
"numberofrules": 0,
"datapath": path_join_robust(BASEDIR_PATH, "data"),
"freshen": False,
"replace": False,
"backup": False,
"skipstatichosts": False,
"keepdomaincomments": True,
"extensionspath": path_join_robust(BASEDIR_PATH, "extensions"),
"extensions": [],
"compress": False,
"minimise": False,
"outputsubfolder": "",
"hostfilename": "hosts",
"targetip": "0.0.0.0",
"sourcedatafilename": "update.json",
"sourcesdata": [],
"readmefilename": "readme.md",
"readmetemplate": path_join_robust(BASEDIR_PATH, "readme_template.md"),
"readmedata": {},
"readmedatafilename": path_join_robust(BASEDIR_PATH, "readmeData.json"),
"exclusionpattern": r"([a-zA-Z\d-]+\.){0,}",
"exclusionregexs": [],
"exclusions": [],
"commonexclusions": [],
"blacklistfile": path_join_robust(BASEDIR_PATH, "blacklist"),
"whitelistfile": path_join_robust(BASEDIR_PATH, "whitelist"),
}
# End Project Settings
def main():
parser = argparse.ArgumentParser(
description="Creates a unified hosts "
"file from hosts stored in the data subfolders."
)
parser.add_argument(
"--auto",
"-a",
dest="auto",
default=True,
action="store_true",
help="Run without prompting.",
)
parser.add_argument(
"--backup",
"-b",
dest="backup",
default=False,
action="store_true",
help="Backup the hosts files before they are overridden.",
)
parser.add_argument(
"--extensions",
"-e",
dest="extensions",
default=["fakenews", "gambling", "unconv"],
nargs="*",
help="Host extensions to include in the final hosts file.",
)
parser.add_argument(
"--ip",
"-i",
dest="targetip",
default="0.0.0.0",
help="Target IP address. Default is 0.0.0.0.",
)
parser.add_argument(
"--keepdomaincomments",
"-k",
dest="keepdomaincomments",
action="store_false",
default=True,
help="Do not keep domain line comments.",
)
parser.add_argument(
"--noupdate",
"-n",
dest="noupdate",
default=True,
action="store_true",
help="Don't update from host data sources.",
)
parser.add_argument(
"--skipstatichosts",
"-s",
dest="skipstatichosts",
default=False,
action="store_true",
help="Skip static localhost entries in the final hosts file.",
)
parser.add_argument(
"--nogendata",
"-g",
dest="nogendata",
default=True,
action="store_true",
help="Skip generation of readmeData.json",
)
parser.add_argument(
"--output",
"-o",
dest="outputsubfolder",
default="",
help="Output subfolder for generated hosts file.",
)
parser.add_argument(
"--replace",
"-r",
dest="replace",
default=False,
action="store_true",
help="Replace your active hosts file with this new hosts file.",
)
parser.add_argument(
"--flush-dns-cache",
"-f",
dest="flushdnscache",
default=False,
action="store_true",
help="Attempt to flush DNS cache after replacing the hosts file.",
)
parser.add_argument(
"--compress",
"-c",
dest="compress",
default=False,
action="store_true",
help="Compress the hosts file ignoring non-necessary lines "
"(empty lines and comments) and putting multiple domains in "
"each line. Improve the performance under Windows.",
)
parser.add_argument(
"--minimise",
"-m",
dest="minimise",
default=True,
action="store_true",
help="Minimise the hosts file ignoring non-necessary lines "
"(empty lines and comments).",
)
parser.add_argument(
"--whitelist",
"-w",
dest="whitelistfile",
default=path_join_robust(BASEDIR_PATH, "whitelist"),
help="Whitelist file to use while generating hosts files.",
)
parser.add_argument(
"--blacklist",
"-x",
dest="blacklistfile",
default=path_join_robust(BASEDIR_PATH, "blacklist"),
help="Blacklist file to use while generating hosts files.",
)
global settings
options = vars(parser.parse_args())
options["outputpath"] = path_join_robust(BASEDIR_PATH, options["outputsubfolder"])
options["freshen"] = not options["noupdate"]
settings = get_defaults()
settings.update(options)
data_path = settings["datapath"]
extensions_path = settings["extensionspath"]
settings["sources"] = list_dir_no_hidden(data_path)
settings["extensionsources"] = list_dir_no_hidden(extensions_path)
# All our extensions folders...
settings["extensions"] = [
os.path.basename(item) for item in list_dir_no_hidden(extensions_path)
]
# ... intersected with the extensions passed-in as arguments, then sorted.
settings["extensions"] = sorted(
list(set(options["extensions"]).intersection(settings["extensions"]))
)
auto = settings["auto"]
exclusion_regexes = settings["exclusionregexs"]
source_data_filename = settings["sourcedatafilename"]
update_sources = prompt_for_update(freshen=settings["freshen"], update_auto=auto)
if update_sources:
update_all_sources(source_data_filename, settings["hostfilename"])
gather_exclusions = False
if gather_exclusions:
common_exclusions = settings["commonexclusions"]
exclusion_pattern = settings["exclusionpattern"]
exclusion_regexes = display_exclusion_options(
common_exclusions=common_exclusions,
exclusion_pattern=exclusion_pattern,
exclusion_regexes=exclusion_regexes,
)
extensions = settings["extensions"]
sources_data = update_sources_data(
settings["sourcesdata"],
datapath=data_path,
extensions=extensions,
extensionspath=extensions_path,
sourcedatafilename=source_data_filename,
)
merge_file = create_initial_file()
remove_old_hosts_file(
path_join_robust(settings["outputpath"], "hosts"), settings["backup"]
)
if settings["compress"]:
final_file = open(path_join_robust(settings["outputpath"], "hosts_unconv"), "w+b")
compressed_file = tempfile.NamedTemporaryFile()
remove_dups_and_excl(merge_file, exclusion_regexes, compressed_file)
compress_file(compressed_file, settings["targetip"], final_file)
elif settings["minimise"]:
final_file = open(path_join_robust(settings["outputpath"], "hosts_unconv"), "w+b")
minimised_file = tempfile.NamedTemporaryFile()
remove_dups_and_excl(merge_file, exclusion_regexes, minimised_file)
minimise_file(minimised_file, settings["targetip"], final_file)
else:
final_file = remove_dups_and_excl(merge_file, exclusion_regexes)
number_of_rules = settings["numberofrules"]
output_subfolder = settings["outputsubfolder"]
skip_static_hosts = settings["skipstatichosts"]
write_opening_header(
final_file,
extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
skipstatichosts=skip_static_hosts,
)
final_file.close()
if not settings["nogendata"]:
update_readme_data(
settings["readmedatafilename"],
extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
sourcesdata=sources_data,
)
print_success(
"Success! The hosts file has been saved in folder "
+ output_subfolder
+ "\nIt contains "
+ "{:,}".format(number_of_rules)
+ " unique entries."
)
move_file = False
# We only flush the DNS cache if we have
# moved a new hosts file into place.
if move_file:
prompt_for_flush_dns_cache(
flush_cache=settings["flushdnscache"], prompt_flush=not auto
)
# Prompt the User
def prompt_for_update(freshen, update_auto):
"""
Prompt the user to update all hosts files.
If requested, the function will update all data sources after it
checks that a hosts file does indeed exist.
Parameters
----------
freshen : bool
Whether data sources should be updated. This function will return
if it is requested that data sources not be updated.
update_auto : bool
Whether or not to automatically update all data sources.
Returns
-------
update_sources : bool
Whether or not we should update data sources for exclusion files.
"""
# Create a hosts file if it doesn't exist.
hosts_file = path_join_robust(BASEDIR_PATH, "hosts_unconv")
if not os.path.isfile(hosts_file):
try:
open(hosts_file, "w+").close()
except (IOError, OSError):
# Starting in Python 3.3, IOError is aliased
# OSError. However, we have to catch both for
# Python 2.x failures.
print_failure(
"ERROR: No 'hosts' file in the folder. Try creating one manually."
)
if not freshen:
return
prompt = "Do you want to update all data sources?"
if update_auto or query_yes_no(prompt):
return True
elif not update_auto:
print("OK, we'll stick with what we've got locally.")
return False
def prompt_for_exclusions(skip_prompt):
"""
Prompt the user to exclude any custom domains from being blocked.
Parameters
----------
skip_prompt : bool
Whether or not to skip prompting for custom domains to be excluded.
If true, the function returns immediately.
Returns
-------
gather_exclusions : bool
Whether or not we should proceed to prompt the user to exclude any
custom domains beyond those in the whitelist.
"""
prompt = (
"Do you want to exclude any domains?\n"
"For example, hulu.com video streaming must be able to access "
"its tracking and ad servers in order to play video."
)
if not skip_prompt:
if query_yes_no(prompt):
return True
else:
print("OK, we'll only exclude domains in the whitelist.")
return False
def prompt_for_flush_dns_cache(flush_cache, prompt_flush):
"""
Prompt the user to flush the DNS cache.
Parameters
----------
flush_cache : bool
Whether to flush the DNS cache without prompting.
prompt_flush : bool
If `flush_cache` is False, whether we should prompt for flushing the
cache. Otherwise, the function returns immediately.
"""
if flush_cache:
flush_dns_cache()
elif prompt_flush:
if query_yes_no("Attempt to flush the DNS cache?"):
flush_dns_cache()
def prompt_for_move(final_file, **move_params):
"""
Prompt the user to move the newly created hosts file to its designated
location in the OS.
Parameters
----------
final_file : file
The file object that contains the newly created hosts data.
move_params : kwargs
Dictionary providing additional parameters for moving the hosts file
into place. Currently, those fields are:
1) auto
2) replace
3) skipstatichosts
Returns
-------
move_file : bool
Whether or not the final hosts file was moved.
"""
skip_static_hosts = move_params["skipstatichosts"]
if move_params["replace"] and not skip_static_hosts:
move_file = True
elif move_params["auto"] or skip_static_hosts:
move_file = False
else:
prompt = "Do you want to replace your existing hosts file with the newly generated file?"
move_file = query_yes_no(prompt)
if move_file:
move_hosts_file_into_place(final_file)
return move_file
# End Prompt the User
def sort_sources(sources):
"""
Sorts the sources.
The idea is that all Steven Black's list, file or entries
get on top and the rest sorted alphabetically.
Parameters
----------
sources: list
The sources to sort.
"""
result = sorted(
sources.copy(),
key=lambda x: x.lower().replace("-", "").replace("_", "").replace(" ", ""),
)
# Steven Black's repositories/files/lists should be on top!
steven_black_positions = [
x for x, y in enumerate(result) if "stevenblack" in y.lower()
]
for index in steven_black_positions:
result.insert(0, result.pop(index))
return result
# Exclusion logic
def display_exclusion_options(common_exclusions, exclusion_pattern, exclusion_regexes):
"""
Display the exclusion options to the user.
This function checks whether a user wants to exclude particular domains,
and if so, excludes them.
Parameters
----------
common_exclusions : list
A list of common domains that are excluded from being blocked. One
example is Hulu. This setting is set directly in the script and cannot
be overwritten by the user.
exclusion_pattern : str
The exclusion pattern with which to create the domain regex.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
aug_exclusion_regexes : list
The original list of regex patterns potentially with additional
patterns from domains that the user chooses to exclude.
"""
for exclusion_option in common_exclusions:
prompt = "Do you want to exclude the domain " + exclusion_option + " ?"
if query_yes_no(prompt):
exclusion_regexes = exclude_domain(
exclusion_option, exclusion_pattern, exclusion_regexes
)
else:
continue
if query_yes_no("Do you want to exclude any other domains?"):
exclusion_regexes = gather_custom_exclusions(
exclusion_pattern, exclusion_regexes
)
return exclusion_regexes
def gather_custom_exclusions(exclusion_pattern, exclusion_regexes):
"""
Gather custom exclusions from the user.
Parameters
----------
exclusion_pattern : str
The exclusion pattern with which to create the domain regex.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
aug_exclusion_regexes : list
The original list of regex patterns potentially with additional
patterns from domains that the user chooses to exclude.
"""
# We continue running this while-loop until the user
# says that they have no more domains to exclude.
while True:
domain_prompt = "Enter the domain you want to exclude (e.g. facebook.com): "
user_domain = input(domain_prompt)
if is_valid_domain_format(user_domain):
exclusion_regexes = exclude_domain(
user_domain, exclusion_pattern, exclusion_regexes
)
continue_prompt = "Do you have more domains you want to enter?"
if not query_yes_no(continue_prompt):
break
return exclusion_regexes
def exclude_domain(domain, exclusion_pattern, exclusion_regexes):
"""
Exclude a domain from being blocked.
This creates the domain regex by which to exclude this domain and appends
it a list of already-existing exclusion regexes.
Parameters
----------
domain : str
The filename or regex pattern to exclude.
exclusion_pattern : str
The exclusion pattern with which to create the domain regex.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
aug_exclusion_regexes : list
The original list of regex patterns with one additional pattern from
the `domain` input.
"""
exclusion_regex = re.compile(exclusion_pattern + domain)
exclusion_regexes.append(exclusion_regex)
return exclusion_regexes
def matches_exclusions(stripped_rule, exclusion_regexes):
"""
Check whether a rule matches an exclusion rule we already provided.
If this function returns True, that means this rule should be excluded
from the final hosts file.
Parameters
----------
stripped_rule : str
The rule that we are checking.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
matches_exclusion : bool
Whether or not the rule string matches a provided exclusion.
"""
stripped_domain = stripped_rule.split()[1]
for exclusionRegex in exclusion_regexes:
if exclusionRegex.search(stripped_domain):
return True
return False
# End Exclusion Logic
# Update Logic
def update_sources_data(sources_data, **sources_params):
"""
Update the sources data and information for each source.
Parameters
----------
sources_data : list
The list of sources data that we are to update.
sources_params : kwargs
Dictionary providing additional parameters for updating the
sources data. Currently, those fields are:
1) datapath
2) extensions
3) extensionspath
4) sourcedatafilename
Returns
-------
update_sources_data : list
The original source data list with new source data appended.
"""
source_data_filename = sources_params["sourcedatafilename"]
for source in sort_sources(
recursive_glob(sources_params["datapath"], source_data_filename)
):
update_file = open(source, "r", encoding="UTF-8")
update_data = json.load(update_file)
sources_data.append(update_data)
update_file.close()
for source in sources_params["extensions"]:
source_dir = path_join_robust(sources_params["extensionspath"], source)
for update_file_path in sort_sources(
recursive_glob(source_dir, source_data_filename)
):
update_file = open(update_file_path, "r")
update_data = json.load(update_file)
sources_data.append(update_data)
update_file.close()
return sources_data
def jsonarray(json_array_string):
"""
Transformer, converts a json array string hosts into one host per
line, prefixing each line with "127.0.0.1 ".
Parameters
----------
json_array_string : str
The json array string in the form
'["example1.com", "example1.com", ...]'
"""
temp_list = json.loads(json_array_string)
hostlines = "127.0.0.1 " + "\n127.0.0.1 ".join(temp_list)
return hostlines
def update_all_sources(source_data_filename, host_filename):
"""
Update all host files, regardless of folder depth.
Parameters
----------
source_data_filename : str
The name of the filename where information regarding updating
sources for a particular URL is stored. This filename is assumed
to be the same for all sources.
host_filename : str
The name of the file in which the updated source information
is stored for a particular URL. This filename is assumed to be
the same for all sources.
"""
# The transforms we support
transform_methods = {"jsonarray": jsonarray}
all_sources = sort_sources(recursive_glob("*", source_data_filename))
for source in all_sources:
update_file = open(source, "r", encoding="UTF-8")
update_data = json.load(update_file)
update_file.close()
update_url = update_data["url"]
update_transforms = []
if update_data.get("transforms"):
update_transforms = update_data["transforms"]
print("Updating source " + os.path.dirname(source) + " from " + update_url)
try:
updated_file = get_file_by_url(update_url)
# spin the transforms as required
for transform in update_transforms:
updated_file = transform_methods[transform](updated_file)
# get rid of carriage-return symbols
updated_file = updated_file.replace("\r", "")
hosts_file = open(
path_join_robust(BASEDIR_PATH, os.path.dirname(source), host_filename),
"wb",
)
write_data(hosts_file, updated_file)
hosts_file.close()
except Exception as e:
print(e)
print("Error in updating source: ", update_url)
# End Update Logic
# File Logic
def create_initial_file():
"""
Initialize the file in which we merge all host files for later pruning.
"""
merge_file = tempfile.NamedTemporaryFile()
# spin the sources for the base file
for source in sort_sources(
recursive_glob(settings["datapath"], settings["hostfilename"])
):
start = "# Start {}\n\n".format(os.path.basename(os.path.dirname(source)))
end = "# End {}\n\n".format(os.path.basename(os.path.dirname(source)))
with open(source, "r", encoding="UTF-8") as curFile:
write_data(merge_file, start + curFile.read() + end)
# spin the sources for extensions to the base file
for source in settings["extensions"]:
for filename in sort_sources(
recursive_glob(
path_join_robust(settings["extensionspath"], source),
settings["hostfilename"],
)
):
with open(filename, "r") as curFile:
write_data(merge_file, curFile.read())
maybe_copy_example_file(settings["blacklistfile"])
if os.path.isfile(settings["blacklistfile"]):
with open(settings["blacklistfile"], "r") as curFile:
write_data(merge_file, curFile.read())
return merge_file
def compress_file(input_file, target_ip, output_file):
"""
Reduce the file dimension removing non-necessary lines (empty lines and
comments) and putting multiple domains in each line.
Reducing the number of lines of the file, the parsing under Microsoft
Windows is much faster.
Parameters
----------
input_file : file
The file object that contains the hostnames that we are reducing.
target_ip : str
The target IP address.
output_file : file
The file object that will contain the reduced hostnames.
"""
input_file.seek(0) # reset file pointer
write_data(output_file, "\n")
target_ip_len = len(target_ip)
lines = [target_ip]
lines_index = 0
for line in input_file.readlines():
line = line.decode("UTF-8")
if line.startswith(target_ip):
if lines[lines_index].count(" ") < 9:
lines[lines_index] += (
" " + line[target_ip_len : line.find("#")].strip() # noqa: E203
)
else:
lines[lines_index] += "\n"
lines.append(line[: line.find("#")].strip())
lines_index += 1
for line in lines:
write_data(output_file, line)
input_file.close()
def minimise_file(input_file, target_ip, output_file):
"""
Reduce the file dimension removing non-necessary lines (empty lines and
comments).
Parameters
----------
input_file : file
The file object that contains the hostnames that we are reducing.
target_ip : str
The target IP address.
output_file : file
The file object that will contain the reduced hostnames.
"""
input_file.seek(0) # reset file pointer
write_data(output_file, "\n")
lines = []
for line in input_file.readlines():
line = line.decode("UTF-8")
if line.startswith(target_ip):
lines.append(line[: line.find("#")].strip() + "\n")
for line in lines:
write_data(output_file, line)
input_file.close()
def remove_dups_and_excl(merge_file, exclusion_regexes, output_file=None):
"""
Remove duplicates and remove hosts that we are excluding.
We check for duplicate hostnames as well as remove any hostnames that
have been explicitly excluded by the user.
Parameters
----------
merge_file : file
The file object that contains the hostnames that we are pruning.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
output_file : file
The file object in which the result is written. If None, the file
'settings["outputpath"]' will be created.
"""
number_of_rules = settings["numberofrules"]
maybe_copy_example_file(settings["whitelistfile"])
if os.path.isfile(settings["whitelistfile"]):
with open(settings["whitelistfile"], "r") as ins:
for line in ins:
line = line.strip(" \t\n\r")
if line and not line.startswith("#"):
settings["exclusions"].append(line)
if not os.path.exists(settings["outputpath"]):
os.makedirs(settings["outputpath"])
if output_file is None:
final_file = open(path_join_robust(settings["outputpath"], "hosts_unconv"), "w+b")
else:
final_file = output_file
merge_file.seek(0) # reset file pointer
hostnames = {"localhost", "localhost.localdomain", "local", "broadcasthost"}
exclusions = settings["exclusions"]
for line in merge_file.readlines():
write_line = True
# Explicit encoding
line = line.decode("UTF-8")
# replace tabs with space
line = line.replace("\t+", " ")
# see gh-271: trim trailing whitespace, periods
line = line.rstrip(" .")
# Testing the first character doesn't require startswith
if line[0] == "#" or re.match(r"^\s*$", line[0]):
write_data(final_file, line)
continue
if "::1" in line:
continue
stripped_rule = strip_rule(line) # strip comments
if not stripped_rule or matches_exclusions(stripped_rule, exclusion_regexes):
continue
# Normalize rule
hostname, normalized_rule = normalize_rule(
stripped_rule,
target_ip=settings["targetip"],
keep_domain_comments=settings["keepdomaincomments"],
)
for exclude in exclusions:
if re.search(r"[\s\.]" + re.escape(exclude) + r"\s", line):
write_line = False
break
if normalized_rule and (hostname not in hostnames) and write_line:
write_data(final_file, normalized_rule)
hostnames.add(hostname)
number_of_rules += 1
settings["numberofrules"] = number_of_rules
merge_file.close()
if output_file is None:
return final_file
def normalize_rule(rule, target_ip, keep_domain_comments):
"""
Standardize and format the rule string provided.
Parameters
----------
rule : str
The rule whose spelling and spacing we are standardizing.
target_ip : str
The target IP address for the rule.
keep_domain_comments : bool
Whether or not to keep comments regarding these domains in
the normalized rule.
Returns
-------
normalized_rule : tuple
A tuple of the hostname and the rule string with spelling
and spacing reformatted.
"""
"""
first try: IP followed by domain
"""
regex = r"^\s*(\d{1,3}\.){3}\d{1,3}\s+([\w\.-]+[a-zA-Z])(.*)"
result = re.search(regex, rule)
if result:
hostname, suffix = result.group(2, 3)
# Explicitly lowercase and trim the hostname.
hostname = hostname.lower().strip()
rule = "%s %s" % (target_ip, hostname)
if suffix and keep_domain_comments:
if not suffix.strip().startswith("#"):
rule += " #%s" % suffix
else:
rule += " %s" % suffix
return hostname, rule + "\n"
"""
next try: IP address followed by host IP address
"""
regex = r"^\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*(.*)"
result = re.search(regex, rule)
if result:
ip_host, suffix = result.group(2, 3)
# Explicitly trim the ip host.
ip_host = ip_host.strip()
rule = "%s %s" % (target_ip, ip_host)
if suffix and keep_domain_comments:
if not suffix.strip().startswith("#"):
rule += " #%s" % suffix
else:
rule += " %s" % suffix
return ip_host, rule + "\n"
"""
finally, if we get here, just belch to screen
"""
print("==>%s<==" % rule)
return None, None
def strip_rule(line):
"""
Sanitize a rule string provided before writing it to the output hosts file.
Parameters
----------
line : str
The rule provided for sanitation.
Returns
-------
sanitized_line : str
The sanitized rule.
"""
split_line = line.split()
if len(split_line) < 2:
# just return blank
return ""
else:
return " ".join(split_line)
def write_opening_header(final_file, **header_params):
"""
Write the header information into the newly-created hosts file.
Parameters
----------
final_file : file
The file object that points to the newly-created hosts file.
header_params : kwargs
Dictionary providing additional parameters for populating the header
information. Currently, those fields are:
1) extensions
2) numberofrules
3) outputsubfolder
4) skipstatichosts
"""
final_file.seek(0) # Reset file pointer.
file_contents = final_file.read() # Save content.
final_file.seek(0) # Write at the top.
if header_params["extensions"]:
if len(header_params["extensions"]) > 1:
write_data(
final_file,
"# Title: StevenBlack/hosts with the {0} and {1} extensions\n#\n".format(
", ".join(header_params["extensions"][:-1]),
header_params["extensions"][-1],
),
)
else:
write_data(
final_file,
"# Title: StevenBlack/hosts with the {0} extension\n#\n".format(
", ".join(header_params["extensions"])
),
)
else:
write_data(final_file, "# Title: StevenBlack/hosts\n#\n")
write_data(
final_file,
"# This hosts file is a merged collection "
"of hosts from reputable sources,\n",
)
write_data(final_file, "# with a dash of crowd sourcing via GitHub\n#\n")
write_data(
final_file,
"# Date: " + time.strftime("%d %B %Y %H:%M:%S (%Z)", time.gmtime()) + "\n",
)
if header_params["extensions"]:
write_data(
final_file,
"# Extensions added to this file: "
+ ", ".join(header_params["extensions"])
+ "\n",
)
write_data(
final_file,
(
"# Number of unique domains: {:,}\n#\n".format(
header_params["numberofrules"]
)
),
)
write_data(
final_file,
"# Fetch the latest version of this file: "
"https://raw.githubusercontent.com/StevenBlack/hosts/master/"
+ path_join_robust(header_params["outputsubfolder"], "").replace("\\", "/")
+ "hosts\n",
)
write_data(
final_file, "# Project home page: https://github.com/StevenBlack/hosts\n"
)
write_data(
final_file,
"# Project releases: https://github.com/StevenBlack/hosts/releases\n#\n",
)
write_data(
final_file,
"# ===============================================================\n",
)
write_data(final_file, "\n")
if not header_params["skipstatichosts"]:
write_data(final_file, "127.0.0.1 localhost\n")
write_data(final_file, "127.0.0.1 localhost.localdomain\n")
write_data(final_file, "127.0.0.1 local\n")
write_data(final_file, "255.255.255.255 broadcasthost\n")
write_data(final_file, "::1 localhost\n")
write_data(final_file, "::1 ip6-localhost\n")
write_data(final_file, "::1 ip6-loopback\n")
write_data(final_file, "fe80::1%lo0 localhost\n")
write_data(final_file, "ff00::0 ip6-localnet\n")
write_data(final_file, "ff00::0 ip6-mcastprefix\n")
write_data(final_file, "ff02::1 ip6-allnodes\n")
write_data(final_file, "ff02::2 ip6-allrouters\n")
write_data(final_file, "ff02::3 ip6-allhosts\n")
write_data(final_file, "0.0.0.0 0.0.0.0\n")
if platform.system() == "Linux":
write_data(final_file, "127.0.1.1 " + socket.gethostname() + "\n")
write_data(final_file, "127.0.0.53 " + socket.gethostname() + "\n")
write_data(final_file, "\n")
preamble = path_join_robust(BASEDIR_PATH, "myhosts")
maybe_copy_example_file(preamble)
if os.path.isfile(preamble):
with open(preamble, "r") as f:
write_data(final_file, f.read())
final_file.write(file_contents)
def update_readme_data(readme_file, **readme_updates):
"""
Update the host and website information provided in the README JSON data.
Parameters
----------
readme_file : str
The name of the README file to update.
readme_updates : kwargs
Dictionary providing additional JSON fields to update before
saving the data. Currently, those fields are:
1) extensions
2) sourcesdata
3) numberofrules
4) outputsubfolder
"""
return
extensions_key = "base"
extensions = readme_updates["extensions"]
if extensions:
extensions_key = "-".join(extensions)
output_folder = readme_updates["outputsubfolder"]
generation_data = {
"location": path_join_robust(output_folder, ""),
"entries": readme_updates["numberofrules"],
"sourcesdata": readme_updates["sourcesdata"],
}
with open(readme_file, "r") as f:
readme_data = json.load(f)
readme_data[extensions_key] = generation_data
for denomination, data in readme_data.copy().items():
if "location" in data and data["location"] and "\\" in data["location"]:
# Windows compatibility: #1166
readme_data[denomination]["location"] = data["location"].replace("\\", "/")
with open(readme_file, "w") as f:
json.dump(readme_data, f)
def move_hosts_file_into_place(final_file):
"""
Move the newly-created hosts file into its correct location on the OS.
For UNIX systems, the hosts file is "etc/hosts." On Windows, it's
"C:\Windows\System32\drivers\etc\hosts."
For this move to work, you must have administrator privileges to do this.
On UNIX systems, this means having "sudo" access, and on Windows, it
means being able to run command prompt in administrator mode.
Parameters
----------
final_file : file object
The newly-created hosts file to move.
""" # noqa: W605
filename = os.path.abspath(final_file.name)
if os.name == "posix":
print(
"Moving the file requires administrative privileges. You might need to enter your password."
)
if subprocess.call(SUDO + ["cp", filename, "/etc/hosts"]):
print_failure("Moving the file failed.")
elif os.name == "nt":
print("Automatically moving the hosts file in place is not yet supported.")
print(
"Please move the generated file to %SystemRoot%\\system32\\drivers\\etc\\hosts"
)
def flush_dns_cache():
"""
Flush the DNS cache.
"""
print("Flushing the DNS cache to utilize new hosts file...")
print(
"Flushing the DNS cache requires administrative privileges. You might need to enter your password."
)
dns_cache_found = False
if platform.system() == "Darwin":
if subprocess.call(SUDO + ["killall", "-HUP", "mDNSResponder"]):
print_failure("Flushing the DNS cache failed.")
elif os.name == "nt":
print("Automatically flushing the DNS cache is not yet supported.")
print(
"Please copy and paste the command 'ipconfig /flushdns' in "
"administrator command prompt after running this script."
)
else:
nscd_prefixes = ["/etc", "/etc/rc.d"]
nscd_msg = "Flushing the DNS cache by restarting nscd {result}"
for nscd_prefix in nscd_prefixes:
nscd_cache = nscd_prefix + "/init.d/nscd"
if os.path.isfile(nscd_cache):
dns_cache_found = True
if subprocess.call(SUDO + [nscd_cache, "restart"]):
print_failure(nscd_msg.format(result="failed"))
else:
print_success(nscd_msg.format(result="succeeded"))
centos_file = "/etc/init.d/network"
centos_msg = "Flushing the DNS cache by restarting network {result}"
if os.path.isfile(centos_file):
if subprocess.call(SUDO + [centos_file, "restart"]):
print_failure(centos_msg.format(result="failed"))
else:
print_success(centos_msg.format(result="succeeded"))
system_prefixes = ["/usr", ""]
service_types = ["NetworkManager", "wicd", "dnsmasq", "networking"]
restarted_services = []
for system_prefix in system_prefixes:
systemctl = system_prefix + "/bin/systemctl"
system_dir = system_prefix + "/lib/systemd/system"
for service_type in service_types:
service = service_type + ".service"
if service in restarted_services:
continue
service_file = path_join_robust(system_dir, service)
service_msg = (
"Flushing the DNS cache by restarting " + service + " {result}"
)
if os.path.isfile(service_file):
if 0 != subprocess.call([systemctl, "status", service],
stdout=subprocess.DEVNULL):
continue
dns_cache_found = True
if subprocess.call(SUDO + [systemctl, "restart", service]):
print_failure(service_msg.format(result="failed"))
else:
print_success(service_msg.format(result="succeeded"))
restarted_services.append(service)
dns_clean_file = "/etc/init.d/dns-clean"
dns_clean_msg = "Flushing the DNS cache via dns-clean executable {result}"
if os.path.isfile(dns_clean_file):
dns_cache_found = True
if subprocess.call(SUDO + [dns_clean_file, "start"]):
print_failure(dns_clean_msg.format(result="failed"))
else:
print_success(dns_clean_msg.format(result="succeeded"))
if not dns_cache_found:
print_failure("Unable to determine DNS management tool.")
def remove_old_hosts_file(old_file_path, backup):
"""
Remove the old hosts file.
This is a hotfix because merging with an already existing hosts file leads
to artifacts and duplicates.
Parameters
----------
backup : boolean, default False
Whether or not to backup the existing hosts file.
"""
old_file_path = path_join_robust(BASEDIR_PATH, "hosts_unconv")
# Create if already removed, so remove won't raise an error.
open(old_file_path, "a").close()
if backup:
backup_file_path = old_file_path + "-{}".format(
time.strftime("%Y-%m-%d-%H-%M-%S")
)
# Make a backup copy, marking the date in which the list was updated
shutil.copy(old_file_path, backup_file_path)
os.remove(old_file_path)
# Create new empty hosts file
open(old_file_path, "a").close()
# End File Logic
def domain_to_idna(line):
"""
Encode a domain that is present into a line into `idna`. This way we
avoid most encoding issues.
Parameters
----------
line : str
The line we have to encode/decode.
Returns
-------
line : str
The line in a converted format.
Notes
-----
- This function encodes only the domain to `idna` format because in
most cases, the encoding issue is due to a domain which looks like
`b'\xc9\xa2oogle.com'.decode('idna')`.
- About the splitting:
We split because we only want to encode the domain and not the full
line, which may cause some issues. Keep in mind that we split, but we
still concatenate once we encoded the domain.
- The following split the prefix `0.0.0.0` or `127.0.0.1` of a line.
- The following also split the trailing comment of a given line.
"""
if not line.startswith("#"):
tabs = "\t"
space = " "
tabs_position, space_position = (line.find(tabs), line.find(space))
if tabs_position > -1 and space_position > -1:
if space_position < tabs_position:
separator = space
else:
separator = tabs
elif not tabs_position == -1:
separator = tabs
elif not space_position == -1:
separator = space
else:
separator = ""
if separator:
splited_line = line.split(separator)
try:
index = 1
while index < len(splited_line):
if splited_line[index]:
break
index += 1
if "#" in splited_line[index]:
index_comment = splited_line[index].find("#")
if index_comment > -1:
comment = splited_line[index][index_comment:]
splited_line[index] = (
splited_line[index]
.split(comment)[0]
.encode("IDNA")
.decode("UTF-8")
+ comment
)
splited_line[index] = splited_line[index].encode("IDNA").decode("UTF-8")
except IndexError:
pass
return separator.join(splited_line)
return line.encode("IDNA").decode("UTF-8")
return line.encode("UTF-8").decode("UTF-8")
# Helper Functions
def maybe_copy_example_file(file_path):
"""
Given a file path, copy over its ".example" if the path doesn't exist.
If the path does exist, nothing happens in this function.
If the path doesn't exist, and the ".example" file doesn't exist, nothing happens in this function.
Parameters
----------
file_path : str
The full file path to check.
"""
if not os.path.isfile(file_path):
example_file_path = file_path + ".example"
if os.path.isfile(example_file_path):
shutil.copyfile(example_file_path, file_path)
def get_file_by_url(url, retries=3, delay=10):
"""
Get a file data located at a particular URL.
Parameters
----------
url : str
The URL at which to access the data.
Returns
-------
url_data : str or None
The data retrieved at that URL from the file. Returns None if the
attempted retrieval is unsuccessful.
Note
----
- BeautifulSoup is used in this case to avoid having to search in which
format we have to encode or decode data before parsing it to UTF-8.
"""
while retries:
try:
with urlopen(url) as f:
soup = BeautifulSoup(f.read(), "lxml").get_text()
return "\n".join(list(map(domain_to_idna, soup.split("\n"))))
except Exception as e:
if 'failure in name resolution' in str(e):
print('No internet connection! Retrying in {} seconds'.format(delay))
time.sleep(delay)
retries -= 1
continue
break
print("Problem getting file: ", url)
def write_data(f, data):
"""
Write data to a file object.
Parameters
----------
f : file
The file object at which to write the data.
data : str
The data to write to the file.
"""
f.write(bytes(data, "UTF-8"))
def list_dir_no_hidden(path):
"""
List all files in a directory, except for hidden files.
Parameters
----------
path : str
The path of the directory whose files we wish to list.
"""
return glob(os.path.join(path, "*"))
def query_yes_no(question, default="yes"):
"""
Ask a yes/no question via input() and get answer from the user.
Inspired by the following implementation:
http://code.activestate.com/recipes/577058
Parameters
----------
question : str
The question presented to the user.
default : str, default "yes"
The presumed answer if the user just hits <Enter>. It must be "yes",
"no", or None (means an answer is required of the user).
Returns
-------
yes : Whether or not the user replied yes to the question.
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes", "no": "no", "n": "no"}
prompt = {None: " [y/n] ", "yes": " [Y/n] ", "no": " [y/N] "}.get(default, None)
if not prompt:
raise ValueError("invalid default answer: '%s'" % default)
reply = None
while not reply:
sys.stdout.write(colorize(question, Colors.PROMPT) + prompt)
choice = input().lower()
reply = None
if default and not choice:
reply = default
elif choice in valid:
reply = valid[choice]
else:
print_failure("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
return reply == "yes"
def is_valid_domain_format(domain):
"""
Check whether a provided domain is valid.
Parameters
----------
domain : str
The domain against which to check.
Returns
-------
valid_domain : bool
Whether or not the domain provided is valid.
"""
if domain == "":
print("You didn't enter a domain. Try again.")
return False
domain_regex = re.compile(r"www\d{0,3}[.]|https?")
if domain_regex.match(domain):
print(
"The domain " + domain + " is not valid. Do not include "
"www.domain.com or http(s)://domain.com. Try again."
)
return False
else:
return True
def recursive_glob(stem, file_pattern):
"""
Recursively match files in a directory according to a pattern.
Parameters
----------
stem : str
The directory in which to recurse
file_pattern : str
The filename regex pattern to which to match.
Returns
-------
matches_list : list
A list of filenames in the directory that match the file pattern.
"""
if sys.version_info >= (3, 5):
return glob(stem + "/**/" + file_pattern, recursive=True)
else:
# gh-316: this will avoid invalid unicode comparisons in Python 2.x
if stem == str("*"):
stem = "."
matches = []
for root, dirnames, filenames in os.walk(stem):
for filename in fnmatch.filter(filenames, file_pattern):
matches.append(path_join_robust(root, filename))
return matches
def path_join_robust(path, *paths):
"""
Wrapper around `os.path.join` with handling for locale issues.
Parameters
----------
path : str
The first path to join.
paths : varargs
Subsequent path strings to join.
Returns
-------
joined_path : str
The joined path string of the two path inputs.
Raises
------
locale.Error : A locale issue was detected that prevents path joining.
"""
try:
# gh-316: joining unicode and str can be saddening in Python 2.x
path = str(path)
paths = [str(another_path) for another_path in paths]
return os.path.join(path, *paths)
except UnicodeDecodeError as e:
raise locale.Error(
"Unable to construct path. This is likely a LOCALE issue:\n\n" + str(e)
)
# Colors
class Colors(object):
PROMPT = "\033[94m"
SUCCESS = "\033[92m"
FAIL = "\033[91m"
ENDC = "\033[0m"
def supports_color():
"""
Check whether the running terminal or command prompt supports color.
Inspired by StackOverflow link (and Django implementation) here:
https://stackoverflow.com/questions/7445658
Returns
-------
colors_supported : bool
Whether the running terminal or command prompt supports color.
"""
sys_platform = sys.platform
supported = sys_platform != "Pocket PC" and (
sys_platform != "win32" or "ANSICON" in os.environ
)
atty_connected = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
return supported and atty_connected
def colorize(text, color):
"""
Wrap a string so that it displays in a particular color.
This function adds a prefix and suffix to a text string so that it is
displayed as a particular color, either in command prompt or the terminal.
If the running terminal or command prompt does not support color, the
original text is returned without being wrapped.
Parameters
----------
text : str
The message to display.
color : str
The color string prefix to put before the text.
Returns
-------
wrapped_str : str
The wrapped string to display in color, if possible.
"""
if not supports_color():
return text
return color + text + Colors.ENDC
def print_success(text):
"""
Print a success message.
Parameters
----------
text : str
The message to display.
"""
print(colorize(text, Colors.SUCCESS))
def print_failure(text):
"""
Print a failure message.
Parameters
----------
text : str
The message to display.
"""
print(colorize(text, Colors.FAIL))
# End Helper Functions
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
spyderlib/plugins/editor.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Editor Plugin"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
from spyderlib.qt import PYQT5
from spyderlib.qt.QtGui import (QVBoxLayout, QPrintDialog, QSplitter, QToolBar,
QAction, QApplication, QDialog, QWidget,
QPrinter, QActionGroup, QInputDialog, QMenu,
QAbstractPrintDialog, QGroupBox, QTabWidget,
QLabel, QFontComboBox, QHBoxLayout,
QKeySequence)
from spyderlib.qt.QtCore import Signal, QByteArray, Qt, Slot
from spyderlib.qt.compat import to_qvariant, from_qvariant, getopenfilenames
import os
import time
import re
import os.path as osp
# Local imports
from spyderlib.utils import encoding, sourcecode, codeanalysis
from spyderlib.baseconfig import get_conf_path, _
from spyderlib.config import CONF, EDIT_FILTERS, get_filter, EDIT_FILETYPES
from spyderlib.guiconfig import get_color_scheme
from spyderlib.utils import programs
from spyderlib.utils.qthelpers import (get_icon, create_action, add_actions,
get_std_icon, get_filetype_icon,
add_shortcut_to_tooltip)
from spyderlib.widgets.findreplace import FindReplace
from spyderlib.widgets.status import (ReadWriteStatus, EOLStatus,
EncodingStatus, CursorPositionStatus)
from spyderlib.widgets.editor import (EditorSplitter, EditorStack, Printer,
EditorMainWindow)
from spyderlib.widgets.sourcecode.codeeditor import CodeEditor
from spyderlib.plugins import SpyderPluginWidget, PluginConfigPage
from spyderlib.plugins.runconfig import (RunConfigDialog, RunConfigOneDialog,
get_run_configuration,
ALWAYS_OPEN_FIRST_RUN_OPTION)
from spyderlib.py3compat import PY2, to_text_string, getcwd, qbytearray_to_str
def _load_all_breakpoints():
bp_dict = CONF.get('run', 'breakpoints', {})
for filename in list(bp_dict.keys()):
if not osp.isfile(filename):
bp_dict.pop(filename)
return bp_dict
def load_breakpoints(filename):
breakpoints = _load_all_breakpoints().get(filename, [])
if breakpoints and isinstance(breakpoints[0], int):
# Old breakpoints format
breakpoints = [(lineno, None) for lineno in breakpoints]
return breakpoints
def save_breakpoints(filename, breakpoints):
if not osp.isfile(filename):
return
bp_dict = _load_all_breakpoints()
bp_dict[filename] = breakpoints
CONF.set('run', 'breakpoints', bp_dict)
def clear_all_breakpoints():
CONF.set('run', 'breakpoints', {})
def clear_breakpoint(filename, lineno):
breakpoints = load_breakpoints(filename)
if breakpoints:
for breakpoint in breakpoints[:]:
if breakpoint[0] == lineno:
breakpoints.remove(breakpoint)
save_breakpoints(filename, breakpoints)
WINPDB_PATH = programs.find_program('winpdb')
class EditorConfigPage(PluginConfigPage):
def get_name(self):
return _("Editor")
def get_icon(self):
return get_icon("edit24.png")
def setup_page(self):
template_btn = self.create_button(_("Edit template for new modules"),
self.plugin.edit_template)
interface_group = QGroupBox(_("Interface"))
font_group = self.create_fontgroup(option=None,
text=_("Text and margin font style"),
fontfilters=QFontComboBox.MonospacedFonts)
newcb = self.create_checkbox
fpsorting_box = newcb(_("Sort files according to full path"),
'fullpath_sorting')
showtabbar_box = newcb(_("Show tab bar"), 'show_tab_bar')
interface_layout = QVBoxLayout()
interface_layout.addWidget(fpsorting_box)
interface_layout.addWidget(showtabbar_box)
interface_group.setLayout(interface_layout)
display_group = QGroupBox(_("Source code"))
linenumbers_box = newcb(_("Show line numbers"), 'line_numbers')
blanks_box = newcb(_("Show blank spaces"), 'blank_spaces')
edgeline_box = newcb(_("Show vertical line after"), 'edge_line')
edgeline_spin = self.create_spinbox("", _("characters"),
'edge_line_column', 79, 1, 500)
edgeline_box.toggled.connect(edgeline_spin.setEnabled)
edgeline_spin.setEnabled(self.get_option('edge_line'))
edgeline_layout = QHBoxLayout()
edgeline_layout.addWidget(edgeline_box)
edgeline_layout.addWidget(edgeline_spin)
currentline_box = newcb(_("Highlight current line"),
'highlight_current_line')
currentcell_box = newcb(_("Highlight current cell"),
'highlight_current_cell')
occurence_box = newcb(_("Highlight occurences after"),
'occurence_highlighting')
occurence_spin = self.create_spinbox("", " ms",
'occurence_highlighting/timeout',
min_=100, max_=1000000, step=100)
occurence_box.toggled.connect(occurence_spin.setEnabled)
occurence_spin.setEnabled(self.get_option('occurence_highlighting'))
occurence_layout = QHBoxLayout()
occurence_layout.addWidget(occurence_box)
occurence_layout.addWidget(occurence_spin)
wrap_mode_box = newcb(_("Wrap lines"), 'wrap')
names = CONF.get('color_schemes', 'names')
choices = list(zip(names, names))
cs_combo = self.create_combobox(_("Syntax color scheme: "),
choices, 'color_scheme_name')
display_layout = QVBoxLayout()
display_layout.addWidget(linenumbers_box)
display_layout.addWidget(blanks_box)
display_layout.addLayout(edgeline_layout)
display_layout.addWidget(currentline_box)
display_layout.addWidget(currentcell_box)
display_layout.addLayout(occurence_layout)
display_layout.addWidget(wrap_mode_box)
display_layout.addWidget(cs_combo)
display_group.setLayout(display_layout)
run_group = QGroupBox(_("Run"))
saveall_box = newcb(_("Save all files before running script"),
'save_all_before_run')
run_selection_group = QGroupBox(_("Run selection"))
focus_box = newcb(_("Maintain focus in the Editor after running cells or selections"),
'focus_to_editor')
introspection_group = QGroupBox(_("Introspection"))
rope_is_installed = programs.is_module_installed('rope')
if rope_is_installed:
completion_box = newcb(_("Automatic code completion"),
'codecompletion/auto')
case_comp_box = newcb(_("Case sensitive code completion"),
'codecompletion/case_sensitive')
comp_enter_box = newcb(_("Enter key selects completion"),
'codecompletion/enter_key')
calltips_box = newcb(_("Display balloon tips"), 'calltips')
gotodef_box = newcb(_("Link to object definition"),
'go_to_definition',
tip=_("If this option is enabled, clicking on an object\n"
"name (left-click + Ctrl key) will go this object\n"
"definition (if resolved)."))
else:
rope_label = QLabel(_("<b>Warning:</b><br>"
"The Python module <i>rope</i> is not "
"installed on this computer: calltips, "
"code completion and go-to-definition "
"features won't be available."))
rope_label.setWordWrap(True)
sourcecode_group = QGroupBox(_("Source code"))
closepar_box = newcb(_("Automatic insertion of parentheses, braces "
"and brackets"),
'close_parentheses')
close_quotes_box = newcb(_("Automatic insertion of closing quotes"),
'close_quotes')
add_colons_box = newcb(_("Automatic insertion of colons after 'for', "
"'if', 'def', etc"),
'add_colons')
autounindent_box = newcb(_("Automatic indentation after 'else', "
"'elif', etc."), 'auto_unindent')
indent_chars_box = self.create_combobox(_("Indentation characters: "),
((_("4 spaces"), '* *'),
(_("2 spaces"), '* *'),
(_("tab"), '*\t*')), 'indent_chars')
tabwidth_spin = self.create_spinbox(_("Tab stop width:"), _("pixels"),
'tab_stop_width', 40, 10, 1000, 10)
tab_mode_box = newcb(_("Tab always indent"),
'tab_always_indent', default=False,
tip=_("If enabled, pressing Tab will always indent,\n"
"even when the cursor is not at the beginning\n"
"of a line (when this option is enabled, code\n"
"completion may be triggered using the alternate\n"
"shortcut: Ctrl+Space)"))
ibackspace_box = newcb(_("Intelligent backspace"),
'intelligent_backspace', default=True)
removetrail_box = newcb(_("Automatically remove trailing spaces "
"when saving files"),
'always_remove_trailing_spaces', default=False)
analysis_group = QGroupBox(_("Analysis"))
pep8_url = '<a href="http://www.python.org/dev/peps/pep-0008/">PEP8</a>'
analysis_label = QLabel(_("<u>Note</u>: add <b>analysis:ignore</b> in "
"a comment to ignore code/style analysis "
"warnings. For more informations on style "
"guide for Python code, please refer to the "
"%s page.") % pep8_url)
analysis_label.setWordWrap(True)
is_pyflakes = codeanalysis.is_pyflakes_installed()
is_pep8 = codeanalysis.get_checker_executable('pep8') is not None
analysis_label.setEnabled(is_pyflakes or is_pep8)
pyflakes_box = newcb(_("Code analysis")+" (pyflakes)",
'code_analysis/pyflakes', default=True,
tip=_("If enabled, Python source code will be analyzed\n"
"using pyflakes, lines containing errors or \n"
"warnings will be highlighted"))
pyflakes_box.setEnabled(is_pyflakes)
if not is_pyflakes:
pyflakes_box.setToolTip(_("Code analysis requires pyflakes %s+") %
codeanalysis.PYFLAKES_REQVER)
pep8_box = newcb(_("Style analysis")+' (pep8)',
'code_analysis/pep8', default=False,
tip=_('If enabled, Python source code will be analyzed\n'
'using pep8, lines that are not following PEP8\n'
'style guide will be highlighted'))
pep8_box.setEnabled(is_pep8)
ancb_layout = QHBoxLayout()
ancb_layout.addWidget(pyflakes_box)
ancb_layout.addWidget(pep8_box)
todolist_box = newcb(_("Tasks (TODO, FIXME, XXX, HINT, TIP, @todo)"),
'todo_list', default=True)
realtime_radio = self.create_radiobutton(
_("Perform analysis when "
"saving file and every"),
'realtime_analysis', True)
saveonly_radio = self.create_radiobutton(
_("Perform analysis only "
"when saving file"),
'onsave_analysis')
af_spin = self.create_spinbox("", " ms", 'realtime_analysis/timeout',
min_=100, max_=1000000, step=100)
af_layout = QHBoxLayout()
af_layout.addWidget(realtime_radio)
af_layout.addWidget(af_spin)
run_layout = QVBoxLayout()
run_layout.addWidget(saveall_box)
run_group.setLayout(run_layout)
run_selection_layout = QVBoxLayout()
run_selection_layout.addWidget(focus_box)
run_selection_group.setLayout(run_selection_layout)
introspection_layout = QVBoxLayout()
if rope_is_installed:
introspection_layout.addWidget(calltips_box)
introspection_layout.addWidget(completion_box)
introspection_layout.addWidget(case_comp_box)
introspection_layout.addWidget(comp_enter_box)
introspection_layout.addWidget(gotodef_box)
else:
introspection_layout.addWidget(rope_label)
introspection_group.setLayout(introspection_layout)
analysis_layout = QVBoxLayout()
analysis_layout.addWidget(analysis_label)
analysis_layout.addLayout(ancb_layout)
analysis_layout.addWidget(todolist_box)
analysis_layout.addLayout(af_layout)
analysis_layout.addWidget(saveonly_radio)
analysis_group.setLayout(analysis_layout)
sourcecode_layout = QVBoxLayout()
sourcecode_layout.addWidget(closepar_box)
sourcecode_layout.addWidget(autounindent_box)
sourcecode_layout.addWidget(add_colons_box)
sourcecode_layout.addWidget(close_quotes_box)
sourcecode_layout.addWidget(indent_chars_box)
sourcecode_layout.addWidget(tabwidth_spin)
sourcecode_layout.addWidget(tab_mode_box)
sourcecode_layout.addWidget(ibackspace_box)
sourcecode_layout.addWidget(removetrail_box)
sourcecode_group.setLayout(sourcecode_layout)
eol_group = QGroupBox(_("End-of-line characters"))
eol_label = QLabel(_("When opening a text file containing "
"mixed end-of-line characters (this may "
"raise syntax errors in the consoles "
"on Windows platforms), Spyder may fix the "
"file automatically."))
eol_label.setWordWrap(True)
check_eol_box = newcb(_("Fix automatically and show warning "
"message box"),
'check_eol_chars', default=True)
eol_layout = QVBoxLayout()
eol_layout.addWidget(eol_label)
eol_layout.addWidget(check_eol_box)
eol_group.setLayout(eol_layout)
tabs = QTabWidget()
tabs.addTab(self.create_tab(font_group, interface_group, display_group),
_("Display"))
tabs.addTab(self.create_tab(introspection_group, analysis_group),
_("Code Introspection/Analysis"))
tabs.addTab(self.create_tab(template_btn, run_group, run_selection_group,
sourcecode_group, eol_group),
_("Advanced settings"))
vlayout = QVBoxLayout()
vlayout.addWidget(tabs)
self.setLayout(vlayout)
class Editor(SpyderPluginWidget):
"""
Multi-file Editor widget
"""
CONF_SECTION = 'editor'
CONFIGWIDGET_CLASS = EditorConfigPage
TEMPFILE_PATH = get_conf_path('temp.py')
TEMPLATE_PATH = get_conf_path('template.py')
DISABLE_ACTIONS_WHEN_HIDDEN = False # SpyderPluginWidget class attribute
# Signals
run_in_current_ipyclient = Signal(str, str, str, bool, bool)
exec_in_extconsole = Signal(str, bool)
redirect_stdio = Signal(bool)
open_dir = Signal(str)
breakpoints_saved = Signal()
run_in_current_extconsole = Signal(str, str, str, bool, bool)
def __init__(self, parent, ignore_last_opened_files=False):
if PYQT5:
SpyderPluginWidget.__init__(self, parent, main=parent)
else:
SpyderPluginWidget.__init__(self, parent)
self.__set_eol_chars = True
self.set_default_color_scheme()
# Creating template if it doesn't already exist
if not osp.isfile(self.TEMPLATE_PATH):
header = ['# -*- coding: utf-8 -*-', '"""', 'Created on %(date)s',
'', '@author: %(username)s', '"""', '']
encoding.write(os.linesep.join(header), self.TEMPLATE_PATH, 'utf-8')
self.projectexplorer = None
self.outlineexplorer = None
self.inspector = None
self.editorstacks = None
self.editorwindows = None
self.editorwindows_to_be_created = None
self.file_dependent_actions = []
self.pythonfile_dependent_actions = []
self.dock_toolbar_actions = None
self.edit_menu_actions = None #XXX: find another way to notify Spyder
# (see spyder.py: 'update_edit_menu' method)
self.search_menu_actions = None #XXX: same thing ('update_search_menu')
self.stack_menu_actions = None
# Initialize plugin
self.initialize_plugin()
# Configuration dialog size
self.dialog_size = None
statusbar = self.main.statusBar()
self.readwrite_status = ReadWriteStatus(self, statusbar)
self.eol_status = EOLStatus(self, statusbar)
self.encoding_status = EncodingStatus(self, statusbar)
self.cursorpos_status = CursorPositionStatus(self, statusbar)
layout = QVBoxLayout()
self.dock_toolbar = QToolBar(self)
add_actions(self.dock_toolbar, self.dock_toolbar_actions)
layout.addWidget(self.dock_toolbar)
self.last_edit_cursor_pos = None
self.cursor_pos_history = []
self.cursor_pos_index = None
self.__ignore_cursor_position = True
self.editorstacks = []
self.last_focus_editorstack = {}
self.editorwindows = []
self.editorwindows_to_be_created = []
self.toolbar_list = None
self.menu_list = None
# Setup new windows:
self.main.all_actions_defined.connect(self.setup_other_windows)
# Change module completions when PYTHONPATH changes
self.main.sig_pythonpath_changed.connect(self.set_path)
# Find widget
self.find_widget = FindReplace(self, enable_replace=True)
self.find_widget.hide()
self.register_widget_shortcuts("Editor", self.find_widget)
# Tabbed editor widget + Find/Replace widget
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
self.editorsplitter = EditorSplitter(self, self,
self.stack_menu_actions, first=True)
editor_layout.addWidget(self.editorsplitter)
editor_layout.addWidget(self.find_widget)
# Splitter: editor widgets (see above) + outline explorer
self.splitter = QSplitter(self)
self.splitter.setContentsMargins(0, 0, 0, 0)
self.splitter.addWidget(editor_widgets)
self.splitter.setStretchFactor(0, 5)
self.splitter.setStretchFactor(1, 1)
layout.addWidget(self.splitter)
self.setLayout(layout)
# Editor's splitter state
state = self.get_option('splitter_state', None)
if state is not None:
self.splitter.restoreState( QByteArray().fromHex(str(state)) )
self.recent_files = self.get_option('recent_files', [])
self.untitled_num = 0
filenames = self.get_option('filenames', [])
if filenames and not ignore_last_opened_files:
self.load(filenames)
layout = self.get_option('layout_settings', None)
if layout is not None:
self.editorsplitter.set_layout_settings(layout)
win_layout = self.get_option('windows_layout_settings', None)
if win_layout:
for layout_settings in win_layout:
self.editorwindows_to_be_created.append(layout_settings)
self.set_last_focus_editorstack(self, self.editorstacks[0])
else:
self.__load_temp_file()
# Parameters of last file execution:
self.__last_ic_exec = None # internal console
self.__last_ec_exec = None # external console
self.__ignore_cursor_position = False
current_editor = self.get_current_editor()
if current_editor is not None:
filename = self.get_current_filename()
position = current_editor.get_position('cursor')
self.add_cursor_position_to_history(filename, position)
self.update_cursorpos_actions()
self.set_path()
def set_projectexplorer(self, projectexplorer):
self.projectexplorer = projectexplorer
@Slot()
def show_hide_project_explorer(self):
if self.projectexplorer is not None:
dw = self.projectexplorer.dockwidget
if dw.isVisible():
dw.hide()
else:
dw.show()
dw.raise_()
self.switch_to_plugin()
def set_outlineexplorer(self, outlineexplorer):
self.outlineexplorer = outlineexplorer
for editorstack in self.editorstacks:
editorstack.set_outlineexplorer(self.outlineexplorer)
self.editorstacks[0].initialize_outlineexplorer()
self.outlineexplorer.edit_goto.connect(
lambda filenames, goto, word:
self.load(filenames=filenames, goto=goto, word=word,
editorwindow=self))
self.outlineexplorer.edit.connect(
lambda filenames:
self.load(filenames=filenames, editorwindow=self))
@Slot()
def show_hide_outline_explorer(self):
if self.outlineexplorer is not None:
dw = self.outlineexplorer.dockwidget
if dw.isVisible():
dw.hide()
else:
dw.show()
dw.raise_()
self.switch_to_plugin()
def set_inspector(self, inspector):
self.inspector = inspector
for editorstack in self.editorstacks:
editorstack.set_inspector(self.inspector)
#------ Private API --------------------------------------------------------
def restore_scrollbar_position(self):
"""Restoring scrollbar position after main window is visible"""
# Widget is now visible, we may center cursor on top level editor:
try:
self.get_current_editor().centerCursor()
except AttributeError:
pass
#------ SpyderPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
title = _('Editor')
filename = self.get_current_filename()
if filename:
title += ' - '+to_text_string(filename)
return title
def get_plugin_icon(self):
"""Return widget icon"""
return get_icon('edit.png')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
return self.get_current_editor()
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginWidget.visibility_changed(self, enable)
if self.dockwidget.isWindow():
self.dock_toolbar.show()
else:
self.dock_toolbar.hide()
if enable:
self.refresh_plugin()
def refresh_plugin(self):
"""Refresh editor plugin"""
editorstack = self.get_current_editorstack()
editorstack.refresh()
self.refresh_save_all_action()
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
state = self.splitter.saveState()
self.set_option('splitter_state', qbytearray_to_str(state))
filenames = []
editorstack = self.editorstacks[0]
filenames += [finfo.filename for finfo in editorstack.data]
self.set_option('layout_settings',
self.editorsplitter.get_layout_settings())
self.set_option('windows_layout_settings',
[win.get_layout_settings() for win in self.editorwindows])
self.set_option('filenames', filenames)
self.set_option('recent_files', self.recent_files)
if not editorstack.save_if_changed(cancelable) and cancelable:
return False
else:
for win in self.editorwindows[:]:
win.close()
return True
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
self.toggle_outline_action = create_action(self,
_("Show/hide outline explorer"),
triggered=self.show_hide_outline_explorer,
context=Qt.WidgetWithChildrenShortcut)
self.register_shortcut(self.toggle_outline_action, context="Editor",
name="Show/hide outline")
self.toggle_project_action = create_action(self,
_("Show/hide project explorer"),
triggered=self.show_hide_project_explorer,
context=Qt.WidgetWithChildrenShortcut)
self.register_shortcut(self.toggle_project_action, context="Editor",
name="Show/hide project explorer")
self.addActions([self.toggle_outline_action, self.toggle_project_action])
# ---- File menu and toolbar ----
self.new_action = create_action(self, _("&New file..."),
icon='filenew.png', tip=_("New file"),
triggered=self.new)
self.register_shortcut(self.new_action, context="Editor",
name="New file")
add_shortcut_to_tooltip(self.new_action, context="Editor",
name="New file")
self.open_action = create_action(self, _("&Open..."),
icon='fileopen.png', tip=_("Open file"),
triggered=self.load)
self.register_shortcut(self.open_action, context="Editor",
name="Open file")
add_shortcut_to_tooltip(self.open_action, context="Editor",
name="Open file")
self.revert_action = create_action(self, _("&Revert"),
icon='revert.png', tip=_("Revert file from disk"),
triggered=self.revert)
self.save_action = create_action(self, _("&Save"),
icon='filesave.png', tip=_("Save file"),
triggered=self.save)
self.register_shortcut(self.save_action, context="Editor",
name="Save file")
add_shortcut_to_tooltip(self.save_action, context="Editor",
name="Save file")
self.save_all_action = create_action(self, _("Sav&e all"),
icon='save_all.png', tip=_("Save all files"),
triggered=self.save_all)
self.register_shortcut(self.save_all_action, context="Editor",
name="Save all")
add_shortcut_to_tooltip(self.save_all_action, context="Editor",
name="Save all")
save_as_action = create_action(self, _("Save &as..."), None,
'filesaveas.png', _("Save current file as..."),
triggered=self.save_as)
print_preview_action = create_action(self, _("Print preview..."),
tip=_("Print preview..."), triggered=self.print_preview)
self.print_action = create_action(self, _("&Print..."),
icon='print.png', tip=_("Print current file..."),
triggered=self.print_file)
self.register_shortcut(self.print_action, context="Editor",
name="Print")
# Shortcut for close_action is defined in widgets/editor.py
self.close_action = create_action(self, _("&Close"),
icon='fileclose.png', tip=_("Close current file"),
triggered=self.close_file)
self.close_all_action = create_action(self, _("C&lose all"),
icon='filecloseall.png', tip=_("Close all opened files"),
triggered=self.close_all_files)
self.register_shortcut(self.close_all_action, context="Editor",
name="Close all")
# ---- Debug menu ----
set_clear_breakpoint_action = create_action(self,
_("Set/Clear breakpoint"),
icon=get_icon("breakpoint_big.png"),
triggered=self.set_or_clear_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_clear_breakpoint_action, context="Editor",
name="Breakpoint")
set_cond_breakpoint_action = create_action(self,
_("Set/Edit conditional breakpoint"),
icon=get_icon("breakpoint_cond_big.png"),
triggered=self.set_or_edit_conditional_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_cond_breakpoint_action, context="Editor",
name="Conditional breakpoint")
clear_all_breakpoints_action = create_action(self,
_("Clear breakpoints in all files"),
triggered=self.clear_all_breakpoints)
breakpoints_menu = QMenu(_("Breakpoints"), self)
add_actions(breakpoints_menu, (set_clear_breakpoint_action,
set_cond_breakpoint_action, None,
clear_all_breakpoints_action))
self.winpdb_action = create_action(self, _("Debug with winpdb"),
triggered=self.run_winpdb)
self.winpdb_action.setEnabled(WINPDB_PATH is not None and PY2)
self.register_shortcut(self.winpdb_action, context="Editor",
name="Debug with winpdb")
# --- Debug toolbar ---
debug_action = create_action(self, _("&Debug"), icon='debug.png',
tip=_("Debug file"),
triggered=self.debug_file)
self.register_shortcut(debug_action, context="Editor", name="Debug")
add_shortcut_to_tooltip(debug_action, context="Editor", name="Debug")
debug_next_action = create_action(self, _("Step"),
icon='arrow-step-over.png', tip=_("Run current line"),
triggered=lambda: self.debug_command("next"))
self.register_shortcut(debug_next_action, "_", "Debug Step Over")
add_shortcut_to_tooltip(debug_next_action, context="_",
name="Debug Step Over")
debug_continue_action = create_action(self, _("Continue"),
icon='arrow-continue.png', tip=_("Continue execution until "
"next breakpoint"),
triggered=lambda: self.debug_command("continue"))
self.register_shortcut(debug_continue_action, "_", "Debug Continue")
add_shortcut_to_tooltip(debug_continue_action, context="_",
name="Debug Continue")
debug_step_action = create_action(self, _("Step Into"),
icon='arrow-step-in.png', tip=_("Step into function or method "
"of current line"),
triggered=lambda: self.debug_command("step"))
self.register_shortcut(debug_step_action, "_", "Debug Step Into")
add_shortcut_to_tooltip(debug_step_action, context="_",
name="Debug Step Into")
debug_return_action = create_action(self, _("Step Return"),
icon='arrow-step-out.png', tip=_("Run until current function "
"or method returns"),
triggered=lambda: self.debug_command("return"))
self.register_shortcut(debug_return_action, "_", "Debug Step Return")
add_shortcut_to_tooltip(debug_return_action, context="_",
name="Debug Step Return")
debug_exit_action = create_action(self, _("Exit"),
icon='stop_debug.png', tip=_("Exit Debug"),
triggered=lambda: self.debug_command("exit"))
self.register_shortcut(debug_exit_action, "_", "Debug Exit")
add_shortcut_to_tooltip(debug_exit_action, context="_",
name="Debug Exit")
debug_control_menu_actions = [debug_next_action,
debug_step_action,
debug_return_action,
debug_continue_action,
debug_exit_action]
debug_control_menu = QMenu(_("Debugging control"))
add_actions(debug_control_menu, debug_control_menu_actions)
# --- Run toolbar ---
run_action = create_action(self, _("&Run"), icon='run.png',
tip=_("Run file"),
triggered=self.run_file)
self.register_shortcut(run_action, context="Editor", name="Run")
add_shortcut_to_tooltip(run_action, context="Editor", name="Run")
configure_action = create_action(self,
_("&Configure..."), icon='run_settings.png',
tip=_("Run settings"),
menurole=QAction.NoRole,
triggered=self.edit_run_configurations)
self.register_shortcut(configure_action, context="Editor",
name="Configure")
add_shortcut_to_tooltip(configure_action, context="Editor",
name="Configure")
re_run_action = create_action(self,
_("Re-run &last script"), icon='run_again.png',
tip=_("Run again last file"),
triggered=self.re_run_file)
self.register_shortcut(re_run_action, context="Editor",
name="Re-run last script")
add_shortcut_to_tooltip(re_run_action, context="Editor",
name="Re-run last script")
run_selected_action = create_action(self, _("Run &selection or "
"current line"),
icon='run_selection.png',
tip=_("Run selection or "
"current line"),
triggered=self.run_selection)
self.register_shortcut(run_selected_action, context="Editor",
name="Run selection")
run_cell_action = create_action(self,
_("Run cell"), icon='run_cell.png',
shortcut=QKeySequence("Ctrl+Enter"),
tip=_("Run current cell (Ctrl+Enter)\n"
"[Use #%% to create cells]"),
triggered=self.run_cell)
run_cell_advance_action = create_action(self,
_("Run cell and advance"),
icon='run_cell_advance.png',
shortcut=QKeySequence("Shift+Enter"),
tip=_("Run current cell and go to "
"the next one (Shift+Enter)"),
triggered=self.run_cell_and_advance)
# --- Source code Toolbar ---
self.todo_list_action = create_action(self,
_("Show todo list"), icon='todo_list.png',
tip=_("Show TODO/FIXME/XXX/HINT/TIP/@todo comments list"),
triggered=self.go_to_next_todo)
self.todo_menu = QMenu(self)
self.todo_list_action.setMenu(self.todo_menu)
self.todo_menu.aboutToShow.connect(self.update_todo_menu)
self.warning_list_action = create_action(self,
_("Show warning/error list"), icon='wng_list.png',
tip=_("Show code analysis warnings/errors"),
triggered=self.go_to_next_warning)
self.warning_menu = QMenu(self)
self.warning_list_action.setMenu(self.warning_menu)
self.warning_menu.aboutToShow.connect(self.update_warning_menu)
self.previous_warning_action = create_action(self,
_("Previous warning/error"), icon='prev_wng.png',
tip=_("Go to previous code analysis warning/error"),
triggered=self.go_to_previous_warning)
self.next_warning_action = create_action(self,
_("Next warning/error"), icon='next_wng.png',
tip=_("Go to next code analysis warning/error"),
triggered=self.go_to_next_warning)
self.previous_edit_cursor_action = create_action(self,
_("Last edit location"), icon='last_edit_location.png',
tip=_("Go to last edit location"),
triggered=self.go_to_last_edit_location)
self.register_shortcut(self.previous_edit_cursor_action,
context="Editor",
name="Last edit location")
self.previous_cursor_action = create_action(self,
_("Previous cursor position"), icon='prev_cursor.png',
tip=_("Go to previous cursor position"),
triggered=self.go_to_previous_cursor_position)
self.register_shortcut(self.previous_cursor_action,
context="Editor",
name="Previous cursor position")
self.next_cursor_action = create_action(self,
_("Next cursor position"), icon='next_cursor.png',
tip=_("Go to next cursor position"),
triggered=self.go_to_next_cursor_position)
self.register_shortcut(self.next_cursor_action,
context="Editor", name="Next cursor position")
# --- Edit Toolbar ---
self.toggle_comment_action = create_action(self,
_("Comment")+"/"+_("Uncomment"), icon='comment.png',
tip=_("Comment current line or selection"),
triggered=self.toggle_comment, context=Qt.WidgetShortcut)
self.register_shortcut(self.toggle_comment_action, context="Editor",
name="Toggle comment")
blockcomment_action = create_action(self, _("Add &block comment"),
tip=_("Add block comment around "
"current line or selection"),
triggered=self.blockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(blockcomment_action, context="Editor",
name="Blockcomment")
unblockcomment_action = create_action(self,
_("R&emove block comment"),
tip = _("Remove comment block around "
"current line or selection"),
triggered=self.unblockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(unblockcomment_action, context="Editor",
name="Unblockcomment")
# ----------------------------------------------------------------------
# The following action shortcuts are hard-coded in CodeEditor
# keyPressEvent handler (the shortcut is here only to inform user):
# (context=Qt.WidgetShortcut -> disable shortcut for other widgets)
self.indent_action = create_action(self,
_("Indent"), "Tab", icon='indent.png',
tip=_("Indent current line or selection"),
triggered=self.indent, context=Qt.WidgetShortcut)
self.unindent_action = create_action(self,
_("Unindent"), "Shift+Tab", icon='unindent.png',
tip=_("Unindent current line or selection"),
triggered=self.unindent, context=Qt.WidgetShortcut)
# ----------------------------------------------------------------------
self.win_eol_action = create_action(self,
_("Carriage return and line feed (Windows)"),
toggled=lambda: self.toggle_eol_chars('nt'))
self.linux_eol_action = create_action(self,
_("Line feed (UNIX)"),
toggled=lambda: self.toggle_eol_chars('posix'))
self.mac_eol_action = create_action(self,
_("Carriage return (Mac)"),
toggled=lambda: self.toggle_eol_chars('mac'))
eol_action_group = QActionGroup(self)
eol_actions = (self.win_eol_action, self.linux_eol_action,
self.mac_eol_action)
add_actions(eol_action_group, eol_actions)
eol_menu = QMenu(_("Convert end-of-line characters"), self)
add_actions(eol_menu, eol_actions)
trailingspaces_action = create_action(self,
_("Remove trailing spaces"),
triggered=self.remove_trailing_spaces)
self.showblanks_action = create_action(self, _("Show blank spaces"),
toggled=self.toggle_show_blanks)
fixindentation_action = create_action(self, _("Fix indentation"),
tip=_("Replace tab characters by space characters"),
triggered=self.fix_indentation)
gotoline_action = create_action(self, _("Go to line..."),
icon=get_icon("gotoline.png"),
triggered=self.go_to_line,
context=Qt.WidgetShortcut)
self.register_shortcut(gotoline_action, context="Editor",
name="Go to line")
workdir_action = create_action(self,
_("Set console working directory"),
icon=get_std_icon('DirOpenIcon'),
tip=_("Set current console (and file explorer) working "
"directory to current script directory"),
triggered=self.__set_workdir)
self.max_recent_action = create_action(self,
_("Maximum number of recent files..."),
triggered=self.change_max_recent_files)
self.clear_recent_action = create_action(self,
_("Clear this list"), tip=_("Clear recent files list"),
triggered=self.clear_recent_files)
self.recent_file_menu = QMenu(_("Open &recent"), self)
self.recent_file_menu.aboutToShow.connect(self.update_recent_file_menu)
file_menu_actions = [self.new_action, self.open_action,
self.recent_file_menu, self.save_action,
self.save_all_action, save_as_action,
self.revert_action,
None, print_preview_action, self.print_action,
None, self.close_action,
self.close_all_action, None]
self.main.file_menu_actions += file_menu_actions
file_toolbar_actions = [self.new_action, self.open_action,
self.save_action, self.save_all_action]
self.main.file_toolbar_actions += file_toolbar_actions
self.edit_menu_actions = [self.toggle_comment_action,
blockcomment_action, unblockcomment_action,
self.indent_action, self.unindent_action]
self.main.edit_menu_actions += [None]+self.edit_menu_actions
edit_toolbar_actions = [self.toggle_comment_action,
self.unindent_action, self.indent_action]
self.main.edit_toolbar_actions += edit_toolbar_actions
self.search_menu_actions = [gotoline_action]
self.main.search_menu_actions += self.search_menu_actions
self.main.search_toolbar_actions += [gotoline_action]
# ---- Run menu/toolbar construction ----
run_menu_actions = [run_action, run_cell_action,
run_cell_advance_action, None, run_selected_action,
re_run_action, configure_action, None]
self.main.run_menu_actions += run_menu_actions
run_toolbar_actions = [run_action, run_cell_action,
run_cell_advance_action, re_run_action,
configure_action]
self.main.run_toolbar_actions += run_toolbar_actions
# ---- Debug menu/toolbar construction ----
# The breakpoints plugin is expecting that
# breakpoints_menu will be the first QMenu in debug_menu_actions
# If breakpoints_menu must be moved below another QMenu in the list
# please update the breakpoints plugin accordingly.
debug_menu_actions = [debug_action, breakpoints_menu,
debug_control_menu, None, self.winpdb_action]
self.main.debug_menu_actions += debug_menu_actions
debug_toolbar_actions = [debug_action, debug_next_action,
debug_step_action, debug_return_action,
debug_continue_action, debug_exit_action]
self.main.debug_toolbar_actions += debug_toolbar_actions
source_menu_actions = [eol_menu, self.showblanks_action,
trailingspaces_action, fixindentation_action]
self.main.source_menu_actions += source_menu_actions
source_toolbar_actions = [self.todo_list_action,
self.warning_list_action, self.previous_warning_action,
self.next_warning_action, None,
self.previous_edit_cursor_action,
self.previous_cursor_action, self.next_cursor_action]
self.main.source_toolbar_actions += source_toolbar_actions
self.dock_toolbar_actions = file_toolbar_actions + [None] + \
source_toolbar_actions + [None] + \
run_toolbar_actions + [None] + \
debug_toolbar_actions + [None] + \
edit_toolbar_actions
self.pythonfile_dependent_actions = [run_action, configure_action,
set_clear_breakpoint_action, set_cond_breakpoint_action,
debug_action, run_selected_action, run_cell_action,
run_cell_advance_action, blockcomment_action,
unblockcomment_action, self.winpdb_action]
self.file_dependent_actions = self.pythonfile_dependent_actions + \
[self.save_action, save_as_action, print_preview_action,
self.print_action, self.save_all_action, gotoline_action,
workdir_action, self.close_action, self.close_all_action,
self.toggle_comment_action, self.revert_action,
self.indent_action, self.unindent_action]
self.stack_menu_actions = [gotoline_action, workdir_action]
return self.file_dependent_actions
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.restore_scrollbar_position.connect(
self.restore_scrollbar_position)
self.main.console.edit_goto.connect(self.load)
self.exec_in_extconsole.connect(self.main.execute_in_external_console)
self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)
self.open_dir.connect(self.main.workingdirectory.chdir)
self.set_inspector(self.main.inspector)
if self.main.outlineexplorer is not None:
self.set_outlineexplorer(self.main.outlineexplorer)
editorstack = self.get_current_editorstack()
if not editorstack.data:
self.__load_temp_file()
self.main.add_dockwidget(self)
#------ Focus tabwidget
def __get_focus_editorstack(self):
fwidget = QApplication.focusWidget()
if isinstance(fwidget, EditorStack):
return fwidget
else:
for editorstack in self.editorstacks:
if editorstack.isAncestorOf(fwidget):
return editorstack
def set_last_focus_editorstack(self, editorwindow, editorstack):
self.last_focus_editorstack[editorwindow] = editorstack
self.last_focus_editorstack[None] = editorstack # very last editorstack
def get_last_focus_editorstack(self, editorwindow=None):
return self.last_focus_editorstack[editorwindow]
def remove_last_focus_editorstack(self, editorstack):
for editorwindow, widget in list(self.last_focus_editorstack.items()):
if widget is editorstack:
self.last_focus_editorstack[editorwindow] = None
def save_focus_editorstack(self):
editorstack = self.__get_focus_editorstack()
if editorstack is not None:
for win in [self]+self.editorwindows:
if win.isAncestorOf(editorstack):
self.set_last_focus_editorstack(win, editorstack)
#------ Handling editorstacks
def register_editorstack(self, editorstack):
self.editorstacks.append(editorstack)
self.register_widget_shortcuts("Editor", editorstack)
if self.isAncestorOf(editorstack):
# editorstack is a child of the Editor plugin
self.set_last_focus_editorstack(self, editorstack)
editorstack.set_closable( len(self.editorstacks) > 1 )
if self.outlineexplorer is not None:
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
editorstack.reset_statusbar.connect(self.readwrite_status.hide)
editorstack.reset_statusbar.connect(self.encoding_status.hide)
editorstack.reset_statusbar.connect(self.cursorpos_status.hide)
editorstack.readonly_changed.connect(
self.readwrite_status.readonly_changed)
editorstack.encoding_changed.connect(
self.encoding_status.encoding_changed)
editorstack.sig_editor_cursor_position_changed.connect(
self.cursorpos_status.cursor_position_changed)
editorstack.refresh_eol_chars.connect(self.eol_status.eol_changed)
editorstack.set_inspector(self.inspector)
editorstack.set_io_actions(self.new_action, self.open_action,
self.save_action, self.revert_action)
editorstack.set_tempfile_path(self.TEMPFILE_PATH)
settings = (
('set_pyflakes_enabled', 'code_analysis/pyflakes'),
('set_pep8_enabled', 'code_analysis/pep8'),
('set_todolist_enabled', 'todo_list'),
('set_realtime_analysis_enabled', 'realtime_analysis'),
('set_realtime_analysis_timeout', 'realtime_analysis/timeout'),
('set_blanks_enabled', 'blank_spaces'),
('set_linenumbers_enabled', 'line_numbers'),
('set_edgeline_enabled', 'edge_line'),
('set_edgeline_column', 'edge_line_column'),
('set_codecompletion_auto_enabled', 'codecompletion/auto'),
('set_codecompletion_case_enabled', 'codecompletion/case_sensitive'),
('set_codecompletion_enter_enabled', 'codecompletion/enter_key'),
('set_calltips_enabled', 'calltips'),
('set_go_to_definition_enabled', 'go_to_definition'),
('set_focus_to_editor', 'focus_to_editor'),
('set_close_parentheses_enabled', 'close_parentheses'),
('set_close_quotes_enabled', 'close_quotes'),
('set_add_colons_enabled', 'add_colons'),
('set_auto_unindent_enabled', 'auto_unindent'),
('set_indent_chars', 'indent_chars'),
('set_tab_stop_width', 'tab_stop_width'),
('set_wrap_enabled', 'wrap'),
('set_tabmode_enabled', 'tab_always_indent'),
('set_intelligent_backspace_enabled', 'intelligent_backspace'),
('set_highlight_current_line_enabled', 'highlight_current_line'),
('set_highlight_current_cell_enabled', 'highlight_current_cell'),
('set_occurence_highlighting_enabled', 'occurence_highlighting'),
('set_occurence_highlighting_timeout', 'occurence_highlighting/timeout'),
('set_checkeolchars_enabled', 'check_eol_chars'),
('set_fullpath_sorting_enabled', 'fullpath_sorting'),
('set_tabbar_visible', 'show_tab_bar'),
('set_always_remove_trailing_spaces', 'always_remove_trailing_spaces'),
)
for method, setting in settings:
getattr(editorstack, method)(self.get_option(setting))
editorstack.set_inspector_enabled(CONF.get('inspector',
'connect/editor'))
color_scheme = get_color_scheme(self.get_option('color_scheme_name'))
editorstack.set_default_font(self.get_plugin_font(), color_scheme)
editorstack.starting_long_process.connect(self.starting_long_process)
editorstack.ending_long_process.connect(self.ending_long_process)
# Redirect signals
editorstack.redirect_stdio.connect(
lambda state: self.redirect_stdio.emit(state))
editorstack.exec_in_extconsole.connect(
lambda text, option:
self.exec_in_extconsole.emit(text, option))
editorstack.update_plugin_title.connect(
lambda: self.update_plugin_title.emit())
editorstack.editor_focus_changed.connect(self.save_focus_editorstack)
editorstack.editor_focus_changed.connect(self.main.plugin_focus_changed)
editorstack.zoom_in.connect(lambda: self.zoom(1))
editorstack.zoom_out.connect(lambda: self.zoom(-1))
editorstack.zoom_reset.connect(lambda: self.zoom(0))
editorstack.sig_new_file.connect(lambda s: self.new(text=s))
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.sig_close_file[()].connect(self.close_file)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.create_new_window.connect(self.create_new_window)
editorstack.opened_files_list_changed.connect(
self.opened_files_list_changed)
editorstack.analysis_results_changed.connect(
self.analysis_results_changed)
editorstack.todo_results_changed.connect(self.todo_results_changed)
editorstack.update_code_analysis_actions.connect(
self.update_code_analysis_actions)
editorstack.update_code_analysis_actions.connect(
self.update_todo_actions)
editorstack.refresh_file_dependent_actions.connect(
self.refresh_file_dependent_actions)
editorstack.refresh_save_all_action.connect(self.refresh_save_all_action)
editorstack.refresh_eol_chars.connect(self.refresh_eol_chars)
editorstack.save_breakpoints.connect(self.save_breakpoints)
editorstack.text_changed_at.connect(self.text_changed_at)
editorstack.current_file_changed.connect(self.current_file_changed)
editorstack.plugin_load.connect(self.load)
editorstack.edit_goto.connect(self.load)
def unregister_editorstack(self, editorstack):
"""Removing editorstack only if it's not the last remaining"""
self.remove_last_focus_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True
else:
# editorstack was not removed!
return False
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
for finfo in editorstack.data:
self.register_widget_shortcuts("Editor", finfo.editor)
@Slot(int, int)
def close_file_in_all_editorstacks(self, editorstack_id_str, index):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
@Slot(int, int)
def file_saved_in_editorstack(self, editorstack_id_str, index, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(index, filename)
@Slot(int, int)
def file_renamed_in_data_in_editorstack(self, editorstack_id_str,
index, filename):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(index, filename)
#------ Handling editor windows
def setup_other_windows(self):
"""Setup toolbars and menus for 'New window' instances"""
self.toolbar_list = (
(_("File toolbar"), self.main.file_toolbar_actions),
(_("Search toolbar"), self.main.search_menu_actions),
(_("Source toolbar"), self.main.source_toolbar_actions),
(_("Run toolbar"), self.main.run_toolbar_actions),
(_("Debug toolbar"), self.main.debug_toolbar_actions),
(_("Edit toolbar"), self.main.edit_toolbar_actions),
)
self.menu_list = (
(_("&File"), self.main.file_menu_actions),
(_("&Edit"), self.main.edit_menu_actions),
(_("&Search"), self.main.search_menu_actions),
(_("Sour&ce"), self.main.source_menu_actions),
(_("&Run"), self.main.run_menu_actions),
(_("&Tools"), self.main.tools_menu_actions),
(_("?"), self.main.help_menu_actions),
)
# Create pending new windows:
for layout_settings in self.editorwindows_to_be_created:
win = self.create_new_window()
win.set_layout_settings(layout_settings)
def create_new_window(self):
oe_options = self.outlineexplorer.get_options()
fullpath_sorting=self.get_option('fullpath_sorting', True),
window = EditorMainWindow(self, self.stack_menu_actions,
self.toolbar_list, self.menu_list,
show_fullpath=oe_options['show_fullpath'],
fullpath_sorting=fullpath_sorting,
show_all_files=oe_options['show_all_files'],
show_comments=oe_options['show_comments'])
window.resize(self.size())
window.show()
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
return window
def register_editorwindow(self, window):
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
self.editorwindows.pop(self.editorwindows.index(window))
#------ Accessors
def get_filenames(self):
return [finfo.filename for finfo in self.editorstacks[0].data]
def get_filename_index(self, filename):
return self.editorstacks[0].has_filename(filename)
def get_current_editorstack(self, editorwindow=None):
if self.editorstacks is not None:
if len(self.editorstacks) == 1:
return self.editorstacks[0]
else:
editorstack = self.__get_focus_editorstack()
if editorstack is None or editorwindow is not None:
return self.get_last_focus_editorstack(editorwindow)
return editorstack
def get_current_editor(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_editor()
def get_current_finfo(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_finfo()
def get_current_filename(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_filename()
def is_file_opened(self, filename=None):
return self.editorstacks[0].is_file_opened(filename)
def set_current_filename(self, filename, editorwindow=None):
"""Set focus to *filename* if this file has been opened
Return the editor instance associated to *filename*"""
editorstack = self.get_current_editorstack(editorwindow)
return editorstack.set_current_filename(filename)
def set_path(self):
for finfo in self.editorstacks[0].data:
finfo.path = self.main.get_spyder_pythonpath()
#------ Refresh methods
def refresh_file_dependent_actions(self):
"""Enable/disable file dependent actions
(only if dockwidget is visible)"""
if self.dockwidget and self.dockwidget.isVisible():
enable = self.get_current_editor() is not None
for action in self.file_dependent_actions:
action.setEnabled(enable)
def refresh_save_all_action(self):
state = False
editorstack = self.editorstacks[0]
if editorstack.get_stack_count() > 1:
state = state or any([finfo.editor.document().isModified()
for finfo in editorstack.data])
self.save_all_action.setEnabled(state)
def update_warning_menu(self):
"""Update warning list menu"""
editorstack = self.get_current_editorstack()
check_results = editorstack.get_analysis_results()
self.warning_menu.clear()
filename = self.get_current_filename()
for message, line_number in check_results:
error = 'syntax' in message
text = message[:1].upper()+message[1:]
icon = get_icon('error.png' if error else 'warning.png')
slot = lambda _l=line_number: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.warning_menu.addAction(action)
def analysis_results_changed(self):
"""
Synchronize analysis results between editorstacks
Refresh analysis navigation buttons
"""
editorstack = self.get_current_editorstack()
results = editorstack.get_analysis_results()
index = editorstack.get_stack_index()
if index != -1:
for other_editorstack in self.editorstacks:
if other_editorstack is not editorstack:
other_editorstack.set_analysis_results(index, results)
self.update_code_analysis_actions()
def update_todo_menu(self):
"""Update todo list menu"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
self.todo_menu.clear()
filename = self.get_current_filename()
for text, line0 in results:
icon = get_icon('todo.png')
slot = lambda _l=line0: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.todo_menu.addAction(action)
self.update_todo_actions()
def todo_results_changed(self):
"""
Synchronize todo results between editorstacks
Refresh todo list navigation buttons
"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
index = editorstack.get_stack_index()
if index != -1:
for other_editorstack in self.editorstacks:
if other_editorstack is not editorstack:
other_editorstack.set_todo_results(index, results)
self.update_todo_actions()
def refresh_eol_chars(self, os_name):
os_name = to_text_string(os_name)
self.__set_eol_chars = False
if os_name == 'nt':
self.win_eol_action.setChecked(True)
elif os_name == 'posix':
self.linux_eol_action.setChecked(True)
else:
self.mac_eol_action.setChecked(True)
self.__set_eol_chars = True
#------ Slots
def opened_files_list_changed(self):
"""
Opened files list has changed:
--> open/close file action
--> modification ('*' added to title)
--> current edited file has changed
"""
# Refresh Python file dependent actions:
editor = self.get_current_editor()
if editor:
enable = editor.is_python()
for action in self.pythonfile_dependent_actions:
if action is self.winpdb_action:
action.setEnabled(enable and WINPDB_PATH is not None)
else:
action.setEnabled(enable)
def update_code_analysis_actions(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_analysis_results()
# Update code analysis buttons
state = (self.get_option('code_analysis/pyflakes') \
or self.get_option('code_analysis/pep8')) \
and results is not None and len(results)
for action in (self.warning_list_action, self.previous_warning_action,
self.next_warning_action):
action.setEnabled(state)
def update_todo_actions(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
state = self.get_option('todo_list') \
and results is not None and len(results)
self.todo_list_action.setEnabled(state)
#------ Breakpoints
def save_breakpoints(self, filename, breakpoints):
filename = to_text_string(filename)
breakpoints = to_text_string(breakpoints)
filename = osp.normpath(osp.abspath(filename))
if breakpoints:
breakpoints = eval(breakpoints)
else:
breakpoints = []
save_breakpoints(filename, breakpoints)
self.breakpoints_saved.emit()
#------ File I/O
def __load_temp_file(self):
"""Load temporary file from a text file in user home directory"""
if not osp.isfile(self.TEMPFILE_PATH):
# Creating temporary file
default = ['# -*- coding: utf-8 -*-',
'"""', _("Spyder Editor"), '',
_("This is a temporary script file."),
'"""', '', '']
text = os.linesep.join([encoding.to_unicode(qstr)
for qstr in default])
encoding.write(to_text_string(text), self.TEMPFILE_PATH, 'utf-8')
self.load(self.TEMPFILE_PATH)
@Slot()
def __set_workdir(self):
"""Set current script directory as working directory"""
fname = self.get_current_filename()
if fname is not None:
directory = osp.dirname(osp.abspath(fname))
self.open_dir.emit(directory)
def __add_recent_file(self, fname):
"""Add to recent file list"""
if fname is None:
return
if fname in self.recent_files:
self.recent_files.remove(fname)
self.recent_files.insert(0, fname)
if len(self.recent_files) > self.get_option('max_recent_files'):
self.recent_files.pop(-1)
def _clone_file_everywhere(self, finfo):
"""Clone file (*src_editor* widget) in all editorstacks
Cloning from the first editorstack in which every single new editor
is created (when loading or creating a new file)"""
for editorstack in self.editorstacks[1:]:
editor = editorstack.clone_editor_from(finfo, set_current=False)
self.register_widget_shortcuts("Editor", editor)
@Slot()
def new(self, fname=None, editorstack=None, text=None):
"""
Create a new file - Untitled
fname=None --> fname will be 'untitledXX.py' but do not create file
fname=<basestring> --> create file
"""
# If no text is provided, create default content
if text is None:
text, enc = encoding.read(self.TEMPLATE_PATH)
enc_match = re.search('-*- coding: ?([a-z0-9A-Z\-]*) -*-', text)
if enc_match:
enc = enc_match.group(1)
# Initialize template variables
# Windows
username = encoding.to_unicode_from_fs(os.environ.get('USERNAME',
''))
# Linux, Mac OS X
if not username:
username = encoding.to_unicode_from_fs(os.environ.get('USER',
'-'))
VARS = {
'date': time.ctime(),
'username': username,
}
try:
text = text % VARS
except:
pass
else:
enc = encoding.read(self.TEMPLATE_PATH)[1]
create_fname = lambda n: to_text_string(_("untitled")) + ("%d.py" % n)
# Creating editor widget
if editorstack is None:
current_es = self.get_current_editorstack()
else:
current_es = editorstack
created_from_here = fname is None
if created_from_here:
while True:
fname = create_fname(self.untitled_num)
self.untitled_num += 1
if not osp.isfile(fname):
break
basedir = getcwd()
if CONF.get('workingdir', 'editor/new/browse_scriptdir'):
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
fname = osp.abspath(osp.join(basedir, fname))
else:
# QString when triggered by a Qt signal
fname = osp.abspath(to_text_string(fname))
index = current_es.has_filename(fname)
if index and not current_es.close_file(index):
return
# Creating the editor widget in the first editorstack (the one that
# can't be destroyed), then cloning this editor widget in all other
# editorstacks:
finfo = self.editorstacks[0].new(fname, enc, text)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(finfo.filename)
self.register_widget_shortcuts("Editor", current_editor)
if not created_from_here:
self.save(force=True)
def edit_template(self):
"""Edit new file template"""
self.load(self.TEMPLATE_PATH)
def update_recent_file_menu(self):
"""Update recent file menu"""
recent_files = []
for fname in self.recent_files:
if not self.is_file_opened(fname) and osp.isfile(fname):
recent_files.append(fname)
self.recent_file_menu.clear()
if recent_files:
for i, fname in enumerate(recent_files):
if i < 10:
accel = "%d" % ((i+1) % 10)
else:
accel = chr(i-10+ord('a'))
action = create_action(self, "&%s %s" % (accel, fname),
icon=get_filetype_icon(fname),
triggered=self.load)
action.setData(to_qvariant(fname))
self.recent_file_menu.addAction(action)
self.clear_recent_action.setEnabled(len(recent_files) > 0)
add_actions(self.recent_file_menu, (None, self.max_recent_action,
self.clear_recent_action))
@Slot()
def clear_recent_files(self):
"""Clear recent files list"""
self.recent_files = []
@Slot()
def change_max_recent_files(self):
"Change max recent files entries"""
editorstack = self.get_current_editorstack()
mrf, valid = QInputDialog.getInteger(editorstack, _('Editor'),
_('Maximum number of recent files'),
self.get_option('max_recent_files'), 1, 35)
if valid:
self.set_option('max_recent_files', mrf)
@Slot(str, int, str, object)
def load(self, filenames=None, goto=None, word='', editorwindow=None,
processevents=True):
"""
Load a text file
editorwindow: load in this editorwindow (useful when clicking on
outline explorer with multiple editor windows)
processevents: determines if processEvents() should be called at the
end of this method (set to False to prevent keyboard events from
creeping through to the editor during debugging)
"""
editor0 = self.get_current_editor()
if editor0 is not None:
position0 = editor0.get_position('cursor')
filename0 = self.get_current_filename()
else:
position0, filename0 = None, None
if not filenames:
# Recent files action
action = self.sender()
if isinstance(action, QAction):
filenames = from_qvariant(action.data(), to_text_string)
if not filenames:
basedir = getcwd()
if CONF.get('workingdir', 'editor/open/browse_scriptdir'):
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
self.redirect_stdio.emit(False)
parent_widget = self.get_current_editorstack()
if filename0 is not None:
selectedfilter = get_filter(EDIT_FILETYPES,
osp.splitext(filename0)[1])
else:
selectedfilter = ''
filenames, _selfilter = getopenfilenames(parent_widget,
_("Open file"), basedir, EDIT_FILTERS,
selectedfilter=selectedfilter)
self.redirect_stdio.emit(True)
if filenames:
filenames = [osp.normpath(fname) for fname in filenames]
if CONF.get('workingdir', 'editor/open/auto_set_to_basedir'):
directory = osp.dirname(filenames[0])
self.open_dir.emit(directory)
else:
return
focus_widget = QApplication.focusWidget()
if self.dockwidget and not self.ismaximized and\
(not self.dockwidget.isAncestorOf(focus_widget)\
and not isinstance(focus_widget, CodeEditor)):
self.dockwidget.setVisible(True)
self.dockwidget.setFocus()
self.dockwidget.raise_()
def _convert(fname):
fname = osp.abspath(encoding.to_unicode_from_fs(fname))
if os.name == 'nt' and len(fname) >= 2 and fname[1] == ':':
fname = fname[0].upper()+fname[1:]
return fname
if hasattr(filenames, 'replaceInStrings'):
# This is a QStringList instance (PyQt API #1), converting to list:
filenames = list(filenames)
if not isinstance(filenames, list):
filenames = [_convert(filenames)]
else:
filenames = [_convert(fname) for fname in list(filenames)]
if isinstance(goto, int):
goto = [goto]
elif goto is not None and len(goto) != len(filenames):
goto = None
for index, filename in enumerate(filenames):
# -- Do not open an already opened file
current_editor = self.set_current_filename(filename, editorwindow)
if current_editor is None:
# -- Not a valid filename:
if not osp.isfile(filename):
continue
# --
current_es = self.get_current_editorstack(editorwindow)
# Creating the editor widget in the first editorstack (the one
# that can't be destroyed), then cloning this editor widget in
# all other editorstacks:
finfo = self.editorstacks[0].load(filename, set_current=False)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(filename)
current_editor.set_breakpoints(load_breakpoints(filename))
self.register_widget_shortcuts("Editor", current_editor)
current_es.analyze_script()
self.__add_recent_file(filename)
if goto is not None: # 'word' is assumed to be None as well
current_editor.go_to_line(goto[index], word=word)
position = current_editor.get_position('cursor')
self.cursor_moved(filename0, position0, filename, position)
current_editor.clearFocus()
current_editor.setFocus()
current_editor.window().raise_()
if processevents:
QApplication.processEvents()
@Slot()
def print_file(self):
"""Print current file"""
editor = self.get_current_editor()
filename = self.get_current_filename()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_plugin_font('printer_header'))
printDialog = QPrintDialog(printer, editor)
if editor.has_selected_text():
printDialog.addEnabledOption(QAbstractPrintDialog.PrintSelection)
self.redirect_stdio.emit(False)
answer = printDialog.exec_()
self.redirect_stdio.emit(True)
if answer == QDialog.Accepted:
self.starting_long_process(_("Printing..."))
printer.setDocName(filename)
editor.print_(printer)
self.ending_long_process()
@Slot()
def print_preview(self):
"""Print preview for current file"""
from spyderlib.qt.QtGui import QPrintPreviewDialog
editor = self.get_current_editor()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_plugin_font('printer_header'))
preview = QPrintPreviewDialog(printer, self)
preview.setWindowFlags(Qt.Window)
preview.paintRequested.connect(lambda printer: editor.print_(printer))
self.redirect_stdio.emit(False)
preview.exec_()
self.redirect_stdio.emit(True)
@Slot()
def close_file(self):
"""Close current file"""
editorstack = self.get_current_editorstack()
editorstack.close_file()
@Slot()
def close_all_files(self):
"""Close all opened scripts"""
self.editorstacks[0].close_all_files()
@Slot()
def save(self, index=None, force=False):
"""Save file"""
editorstack = self.get_current_editorstack()
return editorstack.save(index=index, force=force)
@Slot()
def save_as(self):
"""Save *as* the currently edited file"""
editorstack = self.get_current_editorstack()
if editorstack.save_as():
fname = editorstack.get_current_filename()
if CONF.get('workingdir', 'editor/save/auto_set_to_basedir'):
self.open_dir.emit(osp.dirname(fname))
self.__add_recent_file(fname)
@Slot()
def save_all(self):
"""Save all opened files"""
self.get_current_editorstack().save_all()
@Slot()
def revert(self):
"""Revert the currently edited file from disk"""
editorstack = self.get_current_editorstack()
editorstack.revert()
#------ Explorer widget
def close_file_from_name(self, filename):
"""Close file from its name"""
filename = osp.abspath(to_text_string(filename))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
self.editorstacks[0].close_file(index)
def removed(self, filename):
"""File was removed in file explorer widget or in project explorer"""
self.close_file_from_name(filename)
def removed_tree(self, dirname):
"""Directory was removed in project explorer widget"""
dirname = osp.abspath(to_text_string(dirname))
for fname in self.get_filenames():
if osp.abspath(fname).startswith(dirname):
self.__close(fname)
def renamed(self, source, dest):
"""File was renamed in file explorer widget or in project explorer"""
filename = osp.abspath(to_text_string(source))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
for editorstack in self.editorstacks:
editorstack.rename_in_data(index,
new_filename=to_text_string(dest))
#------ Source code
@Slot()
def indent(self):
"""Indent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.indent()
@Slot()
def unindent(self):
"""Unindent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unindent()
@Slot()
def toggle_comment(self):
"""Comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.toggle_comment()
@Slot()
def blockcomment(self):
"""Block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.blockcomment()
@Slot()
def unblockcomment(self):
"""Un-block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unblockcomment()
@Slot()
def go_to_next_todo(self):
editor = self.get_current_editor()
position = editor.go_to_next_todo()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def go_to_next_warning(self):
editor = self.get_current_editor()
position = editor.go_to_next_warning()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def go_to_previous_warning(self):
editor = self.get_current_editor()
position = editor.go_to_previous_warning()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def run_winpdb(self):
"""Run winpdb to debug current file"""
if self.save():
fname = self.get_current_filename()
runconf = get_run_configuration(fname)
if runconf is None:
args = []
wdir = None
else:
args = runconf.get_arguments().split()
wdir = runconf.get_working_directory()
# Handle the case where wdir comes back as an empty string
# when the working directory dialog checkbox is unchecked.
if not wdir:
wdir = None
programs.run_program(WINPDB_PATH, [fname]+args, wdir)
def toggle_eol_chars(self, os_name):
editor = self.get_current_editor()
if self.__set_eol_chars:
editor.set_eol_chars(sourcecode.get_eol_chars_from_os_name(os_name))
@Slot(bool)
def toggle_show_blanks(self, checked):
editor = self.get_current_editor()
editor.set_blanks_enabled(checked)
@Slot()
def remove_trailing_spaces(self):
editorstack = self.get_current_editorstack()
editorstack.remove_trailing_spaces()
@Slot()
def fix_indentation(self):
editorstack = self.get_current_editorstack()
editorstack.fix_indentation()
#------ Cursor position history management
def update_cursorpos_actions(self):
self.previous_edit_cursor_action.setEnabled(
self.last_edit_cursor_pos is not None)
self.previous_cursor_action.setEnabled(
self.cursor_pos_index is not None and self.cursor_pos_index > 0)
self.next_cursor_action.setEnabled(self.cursor_pos_index is not None \
and self.cursor_pos_index < len(self.cursor_pos_history)-1)
def add_cursor_position_to_history(self, filename, position, fc=False):
if self.__ignore_cursor_position:
return
for index, (fname, pos) in enumerate(self.cursor_pos_history[:]):
if fname == filename:
if pos == position or pos == 0:
if fc:
self.cursor_pos_history[index] = (filename, position)
self.cursor_pos_index = index
self.update_cursorpos_actions()
return
else:
if self.cursor_pos_index >= index:
self.cursor_pos_index -= 1
self.cursor_pos_history.pop(index)
break
if self.cursor_pos_index is not None:
self.cursor_pos_history = \
self.cursor_pos_history[:self.cursor_pos_index+1]
self.cursor_pos_history.append((filename, position))
self.cursor_pos_index = len(self.cursor_pos_history)-1
self.update_cursorpos_actions()
def cursor_moved(self, filename0, position0, filename1, position1):
"""Cursor was just moved: 'go to'"""
if position0 is not None:
self.add_cursor_position_to_history(filename0, position0)
self.add_cursor_position_to_history(filename1, position1)
def text_changed_at(self, filename, position):
self.last_edit_cursor_pos = (to_text_string(filename), position)
def current_file_changed(self, filename, position):
self.add_cursor_position_to_history(to_text_string(filename), position,
fc=True)
@Slot()
def go_to_last_edit_location(self):
if self.last_edit_cursor_pos is not None:
filename, position = self.last_edit_cursor_pos
if not osp.isfile(filename):
self.last_edit_cursor_pos = None
return
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
def __move_cursor_position(self, index_move):
if self.cursor_pos_index is None:
return
filename, _position = self.cursor_pos_history[self.cursor_pos_index]
self.cursor_pos_history[self.cursor_pos_index] = ( filename,
self.get_current_editor().get_position('cursor') )
self.__ignore_cursor_position = True
old_index = self.cursor_pos_index
self.cursor_pos_index = min([
len(self.cursor_pos_history)-1,
max([0, self.cursor_pos_index+index_move])
])
filename, position = self.cursor_pos_history[self.cursor_pos_index]
if not osp.isfile(filename):
self.cursor_pos_history.pop(self.cursor_pos_index)
if self.cursor_pos_index < old_index:
old_index -= 1
self.cursor_pos_index = old_index
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
self.__ignore_cursor_position = False
self.update_cursorpos_actions()
@Slot()
def go_to_previous_cursor_position(self):
self.__move_cursor_position(-1)
@Slot()
def go_to_next_cursor_position(self):
self.__move_cursor_position(1)
@Slot()
def go_to_line(self):
"""Open 'go to line' dialog"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.go_to_line()
@Slot()
def set_or_clear_breakpoint(self):
"""Set/Clear breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.set_or_clear_breakpoint()
@Slot()
def set_or_edit_conditional_breakpoint(self):
"""Set/Edit conditional breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.set_or_edit_conditional_breakpoint()
@Slot()
def clear_all_breakpoints(self):
"""Clear breakpoints in all files"""
clear_all_breakpoints()
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
for data in editorstack.data:
data.editor.clear_breakpoints()
self.refresh_plugin()
def clear_breakpoint(self, filename, lineno):
"""Remove a single breakpoint"""
clear_breakpoint(filename, lineno)
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
index = self.is_file_opened(filename)
if index is not None:
editorstack.data[index].editor.add_remove_breakpoint(lineno)
def debug_command(self, command):
"""Debug actions"""
if self.main.ipyconsole is not None:
if self.main.last_console_plugin_focus_was_python:
self.main.extconsole.execute_python_code(command)
else:
self.main.ipyconsole.write_to_stdin(command)
focus_widget = self.main.ipyconsole.get_focus_widget()
if focus_widget:
focus_widget.setFocus()
else:
self.main.extconsole.execute_python_code(command)
#------ Run Python script
@Slot()
def edit_run_configurations(self):
dialog = RunConfigDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
fname = osp.abspath(self.get_current_filename())
dialog.setup(fname)
if dialog.exec_():
fname = dialog.file_to_run
if fname is not None:
self.load(fname)
self.run_file()
@Slot()
def run_file(self, debug=False):
"""Run script inside current interpreter or in a new one"""
editorstack = self.get_current_editorstack()
if editorstack.save():
editor = self.get_current_editor()
fname = osp.abspath(self.get_current_filename())
# Escape single and double quotes in fname (Fixes Issue 2158)
fname = fname.replace("'", r"\'")
fname = fname.replace('"', r'\"')
runconf = get_run_configuration(fname)
if runconf is None:
dialog = RunConfigOneDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
dialog.setup(fname)
if CONF.get('run', 'open_at_least_once', True):
# Open Run Config dialog at least once: the first time
# a script is ever run in Spyder, so that the user may
# see it at least once and be conscious that it exists
show_dlg = True
CONF.set('run', 'open_at_least_once', False)
else:
# Open Run Config dialog only
# if ALWAYS_OPEN_FIRST_RUN_OPTION option is enabled
show_dlg = CONF.get('run', ALWAYS_OPEN_FIRST_RUN_OPTION)
if show_dlg and not dialog.exec_():
return
runconf = dialog.get_configuration()
wdir = runconf.get_working_directory()
args = runconf.get_arguments()
python_args = runconf.get_python_arguments()
interact = runconf.interact
post_mortem = runconf.post_mortem
current = runconf.current
systerm = runconf.systerm
python = True # Note: in the future, it may be useful to run
# something in a terminal instead of a Python interp.
self.__last_ec_exec = (fname, wdir, args, interact, debug,
python, python_args, current, systerm, post_mortem)
self.re_run_file()
if not interact and not debug:
# If external console dockwidget is hidden, it will be
# raised in top-level and so focus will be given to the
# current external shell automatically
# (see SpyderPluginWidget.visibility_changed method)
editor.setFocus()
def set_dialog_size(self, size):
self.dialog_size = size
@Slot()
def debug_file(self):
"""Debug current script"""
self.run_file(debug=True)
editor = self.get_current_editor()
if editor.get_breakpoints():
time.sleep(0.5)
self.debug_command('continue')
@Slot()
def re_run_file(self):
"""Re-run last script"""
if self.get_option('save_all_before_run'):
self.save_all()
if self.__last_ec_exec is None:
return
(fname, wdir, args, interact, debug,
python, python_args, current, systerm, post_mortem) = self.__last_ec_exec
if current:
if self.main.ipyconsole is not None:
if self.main.last_console_plugin_focus_was_python:
self.run_in_current_extconsole.emit(fname, wdir, args,
debug, post_mortem)
else:
self.run_in_current_ipyclient.emit(fname, wdir, args,
debug, post_mortem)
else:
self.run_in_current_extconsole.emit(fname, wdir, args, debug,
post_mortem)
else:
self.main.open_external_console(fname, wdir, args, interact,
debug, python, python_args,
systerm, post_mortem)
@Slot()
def run_selection(self):
"""Run selection or current line in external console"""
editorstack = self.get_current_editorstack()
editorstack.run_selection()
@Slot()
def run_cell(self):
"""Run current cell"""
editorstack = self.get_current_editorstack()
editorstack.run_cell()
@Slot()
def run_cell_and_advance(self):
"""Run current cell and advance to the next one"""
editorstack = self.get_current_editorstack()
editorstack.run_cell_and_advance()
#------ Zoom in/out/reset
def zoom(self, constant):
"""Zoom in/out/reset"""
editor = self.get_current_editorstack().get_current_editor()
if constant == 0:
font = self.get_plugin_font()
editor.set_font(font)
else:
font = editor.font()
size = font.pointSize() + constant
if size > 0:
font.setPointSize(size)
editor.set_font(font)
#------ Options
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
# toggle_fullpath_sorting
if self.editorstacks is not None:
# --- syntax highlight and text rendering settings
color_scheme_n = 'color_scheme_name'
color_scheme_o = get_color_scheme(self.get_option(color_scheme_n))
font_n = 'plugin_font'
font_o = self.get_plugin_font()
currentline_n = 'highlight_current_line'
currentline_o = self.get_option(currentline_n)
currentcell_n = 'highlight_current_cell'
currentcell_o = self.get_option(currentcell_n)
occurence_n = 'occurence_highlighting'
occurence_o = self.get_option(occurence_n)
occurence_timeout_n = 'occurence_highlighting/timeout'
occurence_timeout_o = self.get_option(occurence_timeout_n)
focus_to_editor_n = 'focus_to_editor'
focus_to_editor_o = self.get_option(focus_to_editor_n)
for editorstack in self.editorstacks:
if font_n in options:
scs = color_scheme_o if color_scheme_n in options else None
editorstack.set_default_font(font_o, scs)
completion_size = CONF.get('editor_appearance',
'completion/size')
for finfo in editorstack.data:
comp_widget = finfo.editor.completion_widget
comp_widget.setup_appearance(completion_size, font_o)
elif color_scheme_n in options:
editorstack.set_color_scheme(color_scheme_o)
if currentline_n in options:
editorstack.set_highlight_current_line_enabled(
currentline_o)
if currentcell_n in options:
editorstack.set_highlight_current_cell_enabled(
currentcell_o)
if occurence_n in options:
editorstack.set_occurence_highlighting_enabled(occurence_o)
if occurence_timeout_n in options:
editorstack.set_occurence_highlighting_timeout(
occurence_timeout_o)
if focus_to_editor_n in options:
editorstack.set_focus_to_editor(focus_to_editor_o)
# --- everything else
fpsorting_n = 'fullpath_sorting'
fpsorting_o = self.get_option(fpsorting_n)
tabbar_n = 'show_tab_bar'
tabbar_o = self.get_option(tabbar_n)
linenb_n = 'line_numbers'
linenb_o = self.get_option(linenb_n)
blanks_n = 'blank_spaces'
blanks_o = self.get_option(blanks_n)
edgeline_n = 'edge_line'
edgeline_o = self.get_option(edgeline_n)
edgelinecol_n = 'edge_line_column'
edgelinecol_o = self.get_option(edgelinecol_n)
wrap_n = 'wrap'
wrap_o = self.get_option(wrap_n)
tabindent_n = 'tab_always_indent'
tabindent_o = self.get_option(tabindent_n)
ibackspace_n = 'intelligent_backspace'
ibackspace_o = self.get_option(ibackspace_n)
removetrail_n = 'always_remove_trailing_spaces'
removetrail_o = self.get_option(removetrail_n)
autocomp_n = 'codecompletion/auto'
autocomp_o = self.get_option(autocomp_n)
case_comp_n = 'codecompletion/case_sensitive'
case_comp_o = self.get_option(case_comp_n)
enter_key_n = 'codecompletion/enter_key'
enter_key_o = self.get_option(enter_key_n)
calltips_n = 'calltips'
calltips_o = self.get_option(calltips_n)
gotodef_n = 'go_to_definition'
gotodef_o = self.get_option(gotodef_n)
closepar_n = 'close_parentheses'
closepar_o = self.get_option(closepar_n)
close_quotes_n = 'close_quotes'
close_quotes_o = self.get_option(close_quotes_n)
add_colons_n = 'add_colons'
add_colons_o = self.get_option(add_colons_n)
autounindent_n = 'auto_unindent'
autounindent_o = self.get_option(autounindent_n)
indent_chars_n = 'indent_chars'
indent_chars_o = self.get_option(indent_chars_n)
tab_stop_width_n = 'tab_stop_width'
tab_stop_width_o = self.get_option(tab_stop_width_n)
inspector_n = 'connect_to_oi'
inspector_o = CONF.get('inspector', 'connect/editor')
todo_n = 'todo_list'
todo_o = self.get_option(todo_n)
pyflakes_n = 'code_analysis/pyflakes'
pyflakes_o = self.get_option(pyflakes_n)
pep8_n = 'code_analysis/pep8'
pep8_o = self.get_option(pep8_n)
rt_analysis_n = 'realtime_analysis'
rt_analysis_o = self.get_option(rt_analysis_n)
rta_timeout_n = 'realtime_analysis/timeout'
rta_timeout_o = self.get_option(rta_timeout_n)
finfo = self.get_current_finfo()
if fpsorting_n in options:
if self.outlineexplorer is not None:
self.outlineexplorer.set_fullpath_sorting(fpsorting_o)
for window in self.editorwindows:
window.editorwidget.outlineexplorer.set_fullpath_sorting(
fpsorting_o)
for editorstack in self.editorstacks:
if fpsorting_n in options:
editorstack.set_fullpath_sorting_enabled(fpsorting_o)
if tabbar_n in options:
editorstack.set_tabbar_visible(tabbar_o)
if linenb_n in options:
editorstack.set_linenumbers_enabled(linenb_o,
current_finfo=finfo)
if blanks_n in options:
editorstack.set_blanks_enabled(blanks_o)
self.showblanks_action.setChecked(blanks_o)
if edgeline_n in options:
editorstack.set_edgeline_enabled(edgeline_o)
if edgelinecol_n in options:
editorstack.set_edgeline_column(edgelinecol_o)
if wrap_n in options:
editorstack.set_wrap_enabled(wrap_o)
if tabindent_n in options:
editorstack.set_tabmode_enabled(tabindent_o)
if ibackspace_n in options:
editorstack.set_intelligent_backspace_enabled(ibackspace_o)
if removetrail_n in options:
editorstack.set_always_remove_trailing_spaces(removetrail_o)
if autocomp_n in options:
editorstack.set_codecompletion_auto_enabled(autocomp_o)
if case_comp_n in options:
editorstack.set_codecompletion_case_enabled(case_comp_o)
if enter_key_n in options:
editorstack.set_codecompletion_enter_enabled(enter_key_o)
if calltips_n in options:
editorstack.set_calltips_enabled(calltips_o)
if gotodef_n in options:
editorstack.set_go_to_definition_enabled(gotodef_o)
if closepar_n in options:
editorstack.set_close_parentheses_enabled(closepar_o)
if close_quotes_n in options:
editorstack.set_close_quotes_enabled(close_quotes_o)
if add_colons_n in options:
editorstack.set_add_colons_enabled(add_colons_o)
if autounindent_n in options:
editorstack.set_auto_unindent_enabled(autounindent_o)
if indent_chars_n in options:
editorstack.set_indent_chars(indent_chars_o)
if tab_stop_width_n in options:
editorstack.set_tab_stop_width(tab_stop_width_o)
if inspector_n in options:
editorstack.set_inspector_enabled(inspector_o)
if todo_n in options:
editorstack.set_todolist_enabled(todo_o,
current_finfo=finfo)
if pyflakes_n in options:
editorstack.set_pyflakes_enabled(pyflakes_o,
current_finfo=finfo)
if pep8_n in options:
editorstack.set_pep8_enabled(pep8_o, current_finfo=finfo)
if rt_analysis_n in options:
editorstack.set_realtime_analysis_enabled(rt_analysis_o)
if rta_timeout_n in options:
editorstack.set_realtime_analysis_timeout(rta_timeout_o)
# We must update the current editor after the others:
# (otherwise, code analysis buttons state would correspond to the
# last editor instead of showing the one of the current editor)
if finfo is not None:
if todo_n in options and todo_o:
finfo.run_todo_finder()
if pyflakes_n in options or pep8_n in options:
finfo.run_code_analysis(pyflakes_o, pep8_o)
|
[] |
[] |
[
"USER",
"USERNAME"
] |
[]
|
["USER", "USERNAME"]
|
python
| 2 | 0 | |
pkg/controller/dnszone/awsactuator_test.go
|
package dnszone
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/golang/mock/gomock"
"github.com/openshift/hive/pkg/awsclient/mock"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"github.com/openshift/hive/pkg/apis"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
)
func init() {
apis.AddToScheme(scheme.Scheme)
}
// TestNewAWSActuator tests that a new AWSActuator object can be created.
func TestNewAWSActuator(t *testing.T) {
cases := []struct {
name string
dnsZone *hivev1.DNSZone
secret *corev1.Secret
}{
{
name: "Successfully create new zone",
dnsZone: validDNSZone(),
secret: validAWSSecret(),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// Arrange
mocks := setupDefaultMocks(t)
expectedAWSActuator := &AWSActuator{
logger: log.WithField("controller", ControllerName),
dnsZone: tc.dnsZone,
}
// Act
zr, err := NewAWSActuator(
expectedAWSActuator.logger,
tc.secret,
tc.dnsZone,
fakeAWSClientBuilder(mocks.mockAWSClient),
)
expectedAWSActuator.awsClient = zr.awsClient // Function pointers can't be compared reliably. Don't compare.
// Assert
assert.Nil(t, err)
assert.NotNil(t, zr.awsClient)
assert.Equal(t, expectedAWSActuator, zr)
})
}
}
func mockAWSZoneExists(expect *mock.MockClientMockRecorder, zone *hivev1.DNSZone) {
if zone.Status.AWS == nil || aws.StringValue(zone.Status.AWS.ZoneID) == "" {
expect.GetResourcesPages(gomock.Any(), gomock.Any()).
Do(func(input *resourcegroupstaggingapi.GetResourcesInput, f func(*resourcegroupstaggingapi.GetResourcesOutput, bool) bool) {
f(&resourcegroupstaggingapi.GetResourcesOutput{
ResourceTagMappingList: []*resourcegroupstaggingapi.ResourceTagMapping{
{
ResourceARN: aws.String("arn:aws:route53:::hostedzone/1234"),
},
},
}, true)
}).Return(nil).Times(1)
}
expect.GetHostedZone(gomock.Any()).Return(&route53.GetHostedZoneOutput{
HostedZone: &route53.HostedZone{
Id: aws.String("1234"),
Name: aws.String("blah.example.com"),
},
}, nil).Times(1)
}
func mockAWSZoneDoesntExist(expect *mock.MockClientMockRecorder, zone *hivev1.DNSZone) {
if zone.Status.AWS != nil && aws.StringValue(zone.Status.AWS.ZoneID) != "" {
expect.GetHostedZone(gomock.Any()).
Return(nil, awserr.New(route53.ErrCodeNoSuchHostedZone, "doesnt exist", fmt.Errorf("doesnt exist"))).Times(1)
return
}
expect.GetResourcesPages(gomock.Any(), gomock.Any()).Return(nil).Times(1)
}
func mockCreateAWSZone(expect *mock.MockClientMockRecorder) {
expect.CreateHostedZone(gomock.Any()).Return(&route53.CreateHostedZoneOutput{
HostedZone: &route53.HostedZone{
Id: aws.String("1234"),
Name: aws.String("blah.example.com"),
},
}, nil).Times(1)
}
func mockCreateAWSZoneDuplicateFailure(expect *mock.MockClientMockRecorder) {
expect.CreateHostedZone(gomock.Any()).Return(nil, awserr.New(route53.ErrCodeHostedZoneAlreadyExists, "already exists", fmt.Errorf("already exists"))).Times(1)
}
func mockNoExistingAWSTags(expect *mock.MockClientMockRecorder) {
expect.ListTagsForResource(gomock.Any()).Return(&route53.ListTagsForResourceOutput{
ResourceTagSet: &route53.ResourceTagSet{
ResourceId: aws.String("1234"),
Tags: []*route53.Tag{},
},
}, nil).Times(1)
}
func mockExistingAWSTags(expect *mock.MockClientMockRecorder) {
expect.ListTagsForResource(gomock.Any()).Return(&route53.ListTagsForResourceOutput{
ResourceTagSet: &route53.ResourceTagSet{
ResourceId: aws.String("1234"),
Tags: []*route53.Tag{
{
Key: aws.String(hiveDNSZoneAWSTag),
Value: aws.String("ns/dnszoneobject"),
},
{
Key: aws.String("foo"),
Value: aws.String("bar"),
},
},
},
}, nil).Times(1)
}
func mockSyncAWSTags(expect *mock.MockClientMockRecorder) {
expect.ChangeTagsForResource(gomock.Any()).Return(&route53.ChangeTagsForResourceOutput{}, nil).AnyTimes()
}
func mockAWSGetNSRecord(expect *mock.MockClientMockRecorder) {
expect.ListResourceRecordSets(gomock.Any()).Return(&route53.ListResourceRecordSetsOutput{
ResourceRecordSets: []*route53.ResourceRecordSet{
{
Type: aws.String("NS"),
Name: aws.String("blah.example.com."),
ResourceRecords: []*route53.ResourceRecord{
{
Value: aws.String("ns1.example.com"),
},
{
Value: aws.String("ns2.example.com"),
},
},
},
},
}, nil)
}
func mockListAWSZonesByNameFound(expect *mock.MockClientMockRecorder, zone *hivev1.DNSZone) {
expect.ListHostedZonesByName(gomock.Any()).Return(&route53.ListHostedZonesByNameOutput{
HostedZones: []*route53.HostedZone{
{
Id: aws.String("1234"),
Name: aws.String("blah.example.com"),
CallerReference: aws.String(string(zone.UID)),
},
},
}, nil).Times(1)
}
func mockDeleteAWSZone(expect *mock.MockClientMockRecorder) {
expect.DeleteHostedZone(gomock.Any()).Return(nil, nil).Times(1)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
internal/pipe/git/git.go
|
package git
import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/apex/log"
"github.com/goreleaser/goreleaser/internal/git"
"github.com/goreleaser/goreleaser/internal/pipe"
"github.com/goreleaser/goreleaser/pkg/context"
)
// Pipe that sets up git state.
type Pipe struct{}
func (Pipe) String() string {
return "getting and validating git state"
}
// Run the pipe.
func (Pipe) Run(ctx *context.Context) error {
if _, err := exec.LookPath("git"); err != nil {
return ErrNoGit
}
info, err := getInfo(ctx)
if err != nil {
return err
}
ctx.Git = info
log.Infof("releasing %s, commit %s", info.CurrentTag, info.Commit)
ctx.Version = strings.TrimPrefix(ctx.Git.CurrentTag, "v")
return validate(ctx)
}
// nolint: gochecknoglobals
var fakeInfo = context.GitInfo{
Branch: "none",
CurrentTag: "v0.0.0",
Commit: "none",
ShortCommit: "none",
FullCommit: "none",
}
func getInfo(ctx *context.Context) (context.GitInfo, error) {
if !git.IsRepo() && ctx.Snapshot {
log.Warn("accepting to run without a git repo because this is a snapshot")
return fakeInfo, nil
}
if !git.IsRepo() {
return context.GitInfo{}, ErrNotRepository
}
info, err := getGitInfo()
if err != nil && ctx.Snapshot {
log.WithError(err).Warn("ignoring errors because this is a snapshot")
if info.Commit == "" {
info = fakeInfo
}
return info, nil
}
return info, err
}
func getGitInfo() (context.GitInfo, error) {
branch, err := getBranch()
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get current branch: %w", err)
}
short, err := getShortCommit()
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get current commit: %w", err)
}
full, err := getFullCommit()
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get current commit: %w", err)
}
date, err := getCommitDate()
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get commit date: %w", err)
}
url, err := getURL()
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get remote URL: %w", err)
}
tag, err := getTag()
if err != nil {
return context.GitInfo{
Branch: branch,
Commit: full,
FullCommit: full,
ShortCommit: short,
CommitDate: date,
URL: url,
CurrentTag: "v0.0.0",
}, ErrNoTag
}
return context.GitInfo{
Branch: branch,
CurrentTag: tag,
Commit: full,
FullCommit: full,
ShortCommit: short,
CommitDate: date,
URL: url,
}, nil
}
func validate(ctx *context.Context) error {
if ctx.Snapshot {
return pipe.ErrSnapshotEnabled
}
if ctx.SkipValidate {
return pipe.ErrSkipValidateEnabled
}
if _, err := os.Stat(".git/shallow"); err == nil {
log.Warn("running against a shallow clone - check your CI documentation at https://goreleaser.com/ci")
}
if err := CheckDirty(); err != nil {
return err
}
_, err := git.Clean(git.Run("describe", "--exact-match", "--tags", "--match", ctx.Git.CurrentTag))
if err != nil {
return ErrWrongRef{
commit: ctx.Git.Commit,
tag: ctx.Git.CurrentTag,
}
}
return nil
}
// CheckDirty returns an error if the current git repository is dirty.
func CheckDirty() error {
out, err := git.Run("status", "--porcelain")
if strings.TrimSpace(out) != "" || err != nil {
return ErrDirty{status: out}
}
return nil
}
func getBranch() (string, error) {
return git.Clean(git.Run("rev-parse", "--abbrev-ref", "HEAD", "--quiet"))
}
func getCommitDate() (time.Time, error) {
ct, err := git.Clean(git.Run("show", "--format='%ct'", "HEAD", "--quiet"))
if err != nil {
return time.Time{}, err
}
if ct == "" {
return time.Time{}, nil
}
i, err := strconv.ParseInt(ct, 10, 64)
if err != nil {
return time.Time{}, err
}
t := time.Unix(i, 0).UTC()
return t, nil
}
func getShortCommit() (string, error) {
return git.Clean(git.Run("show", "--format='%h'", "HEAD", "--quiet"))
}
func getFullCommit() (string, error) {
return git.Clean(git.Run("show", "--format='%H'", "HEAD", "--quiet"))
}
func getTag() (string, error) {
var tag string
var err error
for _, fn := range []func() (string, error){
func() (string, error) {
return os.Getenv("GORELEASER_CURRENT_TAG"), nil
},
func() (string, error) {
return git.Clean(git.Run("tag", "--points-at", "HEAD", "--sort", "-version:creatordate"))
},
func() (string, error) {
return git.Clean(git.Run("describe", "--tags", "--abbrev=0"))
},
} {
tag, err = fn()
if tag != "" || err != nil {
return tag, err
}
}
return tag, err
}
func getURL() (string, error) {
return git.Clean(git.Run("ls-remote", "--get-url"))
}
|
[
"\"GORELEASER_CURRENT_TAG\""
] |
[] |
[
"GORELEASER_CURRENT_TAG"
] |
[]
|
["GORELEASER_CURRENT_TAG"]
|
go
| 1 | 0 | |
70-problems-solved/graphs/roadslibs/Solution.java
|
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Scanner;
public class Solution {
private static final Scanner scanner = new Scanner(System.in);
static long roadsAndLibraries(int n, int costLib, int costRoad, int[][] cities) {
HashMap<Integer, ArrayList<Integer>> cityMap = new HashMap<>();
for (int i = 1; i <= n; i++) {
ArrayList<Integer> list = new ArrayList<>();
list.add(i);
cityMap.put(i, list);
}
for (int[] pair : cities) {
ArrayList<Integer> list1 = cityMap.get(pair[0]);
ArrayList<Integer> list2 = cityMap.get(pair[1]);
if (list1 != list2) {
list1.addAll(list2);
list2.forEach(i -> cityMap.put(i, list1));
}
}
if (costLib < costRoad)
return (long) n * costLib;
else {
long cost = 0;
for (ArrayList<Integer> list : new HashSet<>(cityMap.values())) {
int size = list.size();
if (size > 0) {
cost += costLib;
cost += (long) (size - 1) * costRoad;
}
}
return cost;
}
}
public static void main(String[] args) throws IOException {
try (BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")))) {
int q = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int qItr = 0; qItr < q; qItr++) {
String[] nmC_libC_road = scanner.nextLine().split(" ");
int n = Integer.parseInt(nmC_libC_road[0]);
int m = Integer.parseInt(nmC_libC_road[1]);
int c_lib = Integer.parseInt(nmC_libC_road[2]);
int c_road = Integer.parseInt(nmC_libC_road[3]);
int[][] cities = new int[m][2];
for (int i = 0; i < m; i++) {
String[] citiesRowItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int j = 0; j < 2; j++) {
int citiesItem = Integer.parseInt(citiesRowItems[j]);
cities[i][j] = citiesItem;
}
}
long result = roadsAndLibraries(n, c_lib, c_road, cities);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
}
}
scanner.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
lib/core/handle_pit.py
|
#coding:utf-8
#!/usr/bin/python
import os
import io
import traceback
import time
import os.path
import shutil
from xml.etree.ElementTree import Element, SubElement, ElementTree
import xml.etree.ElementTree as ETS
ETS.register_namespace('', "http://peachfuzzer.com/2012/Peach")
from sqlalchemy import desc
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker,mapper
from sqlalchemy.ext.declarative import declarative_base
import ConfigParser
Base = declarative_base()
class peach_pit(Base):
__tablename__ = 'peach_pit'
peach_id = Column(Integer, primary_key=True)
pit_name = Column(String)
pit_hash = Column(String)
aid = Column(Integer)
peach_test = Column(String)
class pit_test_t(Base):
__tablename__ = 'pit_test_t'
id = Column(Integer, primary_key=True)
test = Column(String)
test_status = Column(Integer)
peach_id = Column(Integer)
class pit_state_model_t(Base):
__tablename__ = 'pit_state_model_t'
id = Column(Integer, primary_key=True)
state_model = Column(String)
state_model_status = Column(Integer)
test_id = Column(Integer, ForeignKey('pit_test_t.id'))
class pit_state_t(Base):
__tablename__ = 'pit_state_t'
id = Column(Integer, primary_key=True)
state_name = Column(String)
state_status = Column(Integer)
state_model_id = Column(Integer, ForeignKey('pit_state_model_t.id'))
action_count = Column(Integer)
action_index = Column(Integer)
#全局变量设置pit文件类型
Const_Pit_Format=["xml"]
current_test_id = 0
current_state_model_id = 0
current_state_id = 0
class HandlePeachPit:
'''这是一个处理peachpit文件的类,由宋哥编写'''
#类变量,设置文件列表
fileList = [""]
#类变量,设置文件计算
counter = 0
def __init__(self):
pass
#配置文件
iniDBFile = "url.ini"
#加载数据库配置信息
def initDBParam(self):
global db_url
db_url = os.environ.get('DATABASE_URL')
#连接数据库
def connectDB(self):
engine = create_engine(db_url)
db_session = sessionmaker(bind=engine)
global session
session = db_session()
def RecusWalkDir(self, dir, filtrate=0):
# 本方法递归遍历目的文件夹中所有文件,获取指定格式的文件绝对地址,利用类变量fileList存储地址
global Const_Pit_Format
for s in os.listdir(dir):
newDir = dir+"/"+s
if os.path.isdir(newDir):
self.RecusWalkDir(newDir, 1)
else:
if os.path.isfile(newDir):
if filtrate:
if newDir and (self.GetFileFormat(newDir) in Const_Pit_Format):
self.HandlePitFile(newDir)
#else:
def GetFileFormat(self, fileName):
"""返回文件格式"""
if fileName:
BaseName=os.path.basename(fileName)
str=BaseName.split(".")
return str[-1]
else:
return fileName
def HandlePitFile(self, fileName):
"""处理peach pit文件"""
tree = ETS.parse(fileName)
etree = tree.getroot()
for state_model_node in etree:
if state_model_node.tag == "{http://peachfuzzer.com/2012/Peach}StateModel":
state_model_name = state_model_node.attrib['name']
self.HandleStateModel(state_model_name)
for state_node in state_model_node:
if state_node.tag =="{http://peachfuzzer.com/2012/Peach}State":
state_name = state_node.attrib['name']
action_index = 0
for action_node in state_node:
if action_node.tag =="{http://peachfuzzer.com/2012/Peach}Action" and action_node.attrib['type'] == "output":
action_index = action_index + 1
self.HandleState(state_name, action_index)
#pit_state_model_t
def HandleStateModel(self, state_model_name):
global current_test_id
global current_state_model_id
global current_state_id
if(state_model_name):
session.commit()
pit_state_model_m = pit_state_model_t()
pit_state_model_m.state_model = state_model_name
pit_state_model_m.state_model_status = 0
pit_state_model_m.test_id = current_test_id
session.add(pit_state_model_m)
session.commit()
query = session.query(pit_state_model_t.id).order_by(pit_state_model_t.id.desc()).first()
rs = []
t = ()
for t in query:
rs.append(t)
current_state_model_id = rs[0]
def HandleState(self, state_name, action_count):
global current_test_id
global current_state_model_id
global current_state_id
if(state_name):
session.commit()
pit_state_m = pit_state_t()
pit_state_m.state_name = state_name
pit_state_m.state_status = 0
pit_state_m.state_model_id = current_state_model_id
pit_state_m.action_count = action_count
pit_state_m.action_index = 1
session.add(pit_state_m)
session.commit()
query = session.query(pit_state_t.id).order_by(pit_state_t.id.desc()).first()
rs = []
t = ()
for t in query:
rs.append(t)
current_state_id = rs[0]
def HandleTest(self, test_name, peach_id):
global current_test_id
global current_state_model_id
global current_state_id
if(test_name):
session.commit()
pit_test_m = pit_test_t()
pit_test_m.test = test_name
pit_test_m.test_status = 0
pit_test_m.peach_id = peach_id
session.add(pit_test_m)
session.commit()
session.commit()
query = session.query(pit_test_t.id).order_by(pit_test_t.id.desc()).first()
rs = []
t = ()
for t in query:
rs.append(t)
current_test_id = rs[0]
if __name__=="__main__":
b = HandlePeachPit()
b.initDBParam()
b.connectDB()
peach_id = 0
b.HandleTest("Default", peach_id)
b.RecusWalkDir(dir="E:/ftp_fuzzing", filtrate=1)
|
[] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
python
| 1 | 0 | |
src/landing_cms/settings.py
|
"""
Django settings for landing_cms project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's#pyt5(0qd#zy@8n#w(po6$gnt1!gds4&9hofr&q2)4214mb08'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sorl.thumbnail',
'web',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'landing_cms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates', ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'landing_cms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['POSTGRES_DB'],
'USER': os.environ['POSTGRES_USER'],
'PASSWORD': os.environ['POSTGRES_PASSWORD'],
'HOST': os.environ['POSTGRES_URL'],
'PORT': os.environ['POSTGRES_PORT'],
},
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
PUBLIC_DIR = os.path.join(BASE_DIR, 'public')
STATIC_ROOT = os.path.join(PUBLIC_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PUBLIC_DIR, 'media')
MEDIA_URL = '/media/'
for d in (PUBLIC_DIR, STATIC_ROOT, MEDIA_ROOT):
if not os.path.isdir(d):
os.mkdir(d)
if DEBUG:
ALLOWED_HOSTS += ['*', ]
def show_toolbar(request):
return True
INSTALLED_APPS += ['debug_toolbar', ]
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': show_toolbar,
}
|
[] |
[] |
[
"POSTGRES_USER",
"POSTGRES_URL",
"POSTGRES_PORT",
"POSTGRES_DB",
"POSTGRES_PASSWORD"
] |
[]
|
["POSTGRES_USER", "POSTGRES_URL", "POSTGRES_PORT", "POSTGRES_DB", "POSTGRES_PASSWORD"]
|
python
| 5 | 0 | |
kubernetes-model/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go
|
/**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winkernel
import (
"encoding/json"
"fmt"
"net"
"os"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/Microsoft/hcsshim"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
"k8s.io/kubernetes/pkg/util/async"
)
// KernelCompatTester tests whether the required kernel capabilities are
// present to run the windows kernel proxier.
type KernelCompatTester interface {
IsCompatible() error
}
// CanUseWinKernelProxier returns true if we should use the Kernel Proxier
// instead of the "classic" userspace Proxier. This is determined by checking
// the windows kernel version and for the existence of kernel features.
func CanUseWinKernelProxier(kcompat KernelCompatTester) (bool, error) {
// Check that the kernel supports what we need.
if err := kcompat.IsCompatible(); err != nil {
return false, err
}
return true, nil
}
type WindowsKernelCompatTester struct{}
// IsCompatible returns true if winkernel can support this mode of proxy
func (lkct WindowsKernelCompatTester) IsCompatible() error {
_, err := hcsshim.HNSListPolicyListRequest()
if err != nil {
return fmt.Errorf("Windows kernel is not compatible for Kernel mode")
}
return nil
}
type externalIPInfo struct {
ip string
hnsID string
}
type loadBalancerIngressInfo struct {
ip string
hnsID string
}
// internal struct for string service information
type serviceInfo struct {
clusterIP net.IP
port int
protocol api.Protocol
nodePort int
targetPort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
stickyMaxAgeSeconds int
externalIPs []*externalIPInfo
loadBalancerIngressIPs []*loadBalancerIngressInfo
loadBalancerSourceRanges []string
onlyNodeLocalEndpoints bool
healthCheckNodePort int
hnsID string
nodePorthnsID string
policyApplied bool
}
type hnsNetworkInfo struct {
name string
id string
}
func Log(v interface{}, message string, level glog.Level) {
glog.V(level).Infof("%s, %s", message, spew.Sdump(v))
}
func LogJson(v interface{}, message string, level glog.Level) {
jsonString, err := json.Marshal(v)
if err == nil {
glog.V(level).Infof("%s, %s", message, string(jsonString))
}
}
// internal struct for endpoints information
type endpointsInfo struct {
ip string
port uint16
isLocal bool
macAddress string
hnsID string
refCount uint16
}
func newEndpointInfo(ip string, port uint16, isLocal bool) *endpointsInfo {
info := &endpointsInfo{
ip: ip,
port: port,
isLocal: isLocal,
macAddress: "00:11:22:33:44:55", // Hardcoding to some Random Mac
refCount: 0,
hnsID: "",
}
return info
}
func (ep *endpointsInfo) Cleanup() {
Log(ep, "Endpoint Cleanup", 3)
ep.refCount--
// Remove the remote hns endpoint, if no service is referring it
// Never delete a Local Endpoint. Local Endpoints are already created by other entities.
// Remove only remote endpoints created by this service
if ep.refCount <= 0 && !ep.isLocal {
glog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep)
deleteHnsEndpoint(ep.hnsID)
ep.hnsID = ""
}
}
// returns a new serviceInfo struct
func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo {
onlyNodeLocalEndpoints := false
if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) &&
apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
// set default session sticky max age 180min=10800s
stickyMaxAgeSeconds := 10800
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
info := &serviceInfo{
clusterIP: net.ParseIP(service.Spec.ClusterIP),
port: int(port.Port),
protocol: port.Protocol,
nodePort: int(port.NodePort),
targetPort: port.TargetPort.IntValue(),
// Deep-copy in case the service instance changes
loadBalancerStatus: *helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
sessionAffinityType: service.Spec.SessionAffinity,
stickyMaxAgeSeconds: stickyMaxAgeSeconds,
loadBalancerSourceRanges: make([]string, len(service.Spec.LoadBalancerSourceRanges)),
onlyNodeLocalEndpoints: onlyNodeLocalEndpoints,
}
copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
for _, eip := range service.Spec.ExternalIPs {
info.externalIPs = append(info.externalIPs, &externalIPInfo{ip: eip})
}
for _, ingress := range service.Status.LoadBalancer.Ingress {
info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP})
}
if apiservice.NeedsHealthCheck(service) {
p := service.Spec.HealthCheckNodePort
if p == 0 {
glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
} else {
info.healthCheckNodePort = int(p)
}
}
return info
}
type endpointsChange struct {
previous proxyEndpointsMap
current proxyEndpointsMap
}
type endpointsChangeMap struct {
lock sync.Mutex
hostname string
items map[types.NamespacedName]*endpointsChange
}
type serviceChange struct {
previous proxyServiceMap
current proxyServiceMap
}
type serviceChangeMap struct {
lock sync.Mutex
items map[types.NamespacedName]*serviceChange
}
type updateEndpointMapResult struct {
hcEndpoints map[types.NamespacedName]int
staleEndpoints map[endpointServicePair]bool
staleServiceNames map[proxy.ServicePortName]bool
}
type updateServiceMapResult struct {
hcServices map[types.NamespacedName]uint16
staleServices sets.String
}
type proxyServiceMap map[proxy.ServicePortName]*serviceInfo
type proxyEndpointsMap map[proxy.ServicePortName][]*endpointsInfo
func newEndpointsChangeMap(hostname string) endpointsChangeMap {
return endpointsChangeMap{
hostname: hostname,
items: make(map[types.NamespacedName]*endpointsChange),
}
}
func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) bool {
ecm.lock.Lock()
defer ecm.lock.Unlock()
change, exists := ecm.items[*namespacedName]
if !exists {
change = &endpointsChange{}
change.previous = endpointsToEndpointsMap(previous, ecm.hostname)
ecm.items[*namespacedName] = change
}
change.current = endpointsToEndpointsMap(current, ecm.hostname)
if reflect.DeepEqual(change.previous, change.current) {
delete(ecm.items, *namespacedName)
}
return len(ecm.items) > 0
}
func newServiceChangeMap() serviceChangeMap {
return serviceChangeMap{
items: make(map[types.NamespacedName]*serviceChange),
}
}
func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) bool {
scm.lock.Lock()
defer scm.lock.Unlock()
change, exists := scm.items[*namespacedName]
if !exists {
// Service is Added
change = &serviceChange{}
change.previous = serviceToServiceMap(previous)
scm.items[*namespacedName] = change
}
change.current = serviceToServiceMap(current)
if reflect.DeepEqual(change.previous, change.current) {
delete(scm.items, *namespacedName)
}
return len(scm.items) > 0
}
func (sm *proxyServiceMap) merge(other proxyServiceMap, curEndpoints proxyEndpointsMap) sets.String {
existingPorts := sets.NewString()
for svcPortName, info := range other {
existingPorts.Insert(svcPortName.Port)
svcInfo, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
} else {
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
svcInfo.cleanupAllPolicies(curEndpoints[svcPortName])
delete(*sm, svcPortName)
}
(*sm)[svcPortName] = info
}
return existingPorts
}
func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleServices sets.String, curEndpoints proxyEndpointsMap) {
for svcPortName := range other {
if existingPorts.Has(svcPortName.Port) {
continue
}
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.protocol == api.ProtocolUDP {
staleServices.Insert(info.clusterIP.String())
}
info.cleanupAllPolicies(curEndpoints[svcPortName])
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q removed, but doesn't exists", svcPortName)
}
}
}
func (em proxyEndpointsMap) merge(other proxyEndpointsMap, curServices proxyServiceMap) {
// Endpoint Update/Add
for svcPortName := range other {
epInfos, exists := em[svcPortName]
if exists {
//
info, exists := curServices[svcPortName]
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
if exists {
glog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName)
info.cleanupAllPolicies(epInfos)
} else {
// If no service exists, just cleanup the remote endpoints
glog.V(2).Infof("Endpoints are orphaned. Cleaning up")
// Cleanup Endpoints references
for _, ep := range epInfos {
ep.Cleanup()
}
}
delete(em, svcPortName)
}
em[svcPortName] = other[svcPortName]
}
}
func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap, curServices proxyServiceMap) {
// Endpoint Update/Removal
for svcPortName := range other {
info, exists := curServices[svcPortName]
if exists {
glog.V(2).Infof("Service [%v] is stale", info)
info.cleanupAllPolicies(em[svcPortName])
} else {
// If no service exists, just cleanup the remote endpoints
glog.V(2).Infof("Endpoints are orphaned. Cleaning up")
// Cleanup Endpoints references
epInfos, exists := em[svcPortName]
if exists {
for _, ep := range epInfos {
ep.Cleanup()
}
}
}
delete(em, svcPortName)
}
}
// Proxier is an hns based proxy for connections between a localhost:lport
// and services that provide the actual backends.
type Proxier struct {
// endpointsChanges and serviceChanges contains all changes to endpoints and
// services that happened since policies were synced. For a single object,
// changes are accumulated, i.e. previous is state from before all of them,
// current is state after applying all of those.
endpointsChanges endpointsChangeMap
serviceChanges serviceChangeMap
mu sync.Mutex // protects the following fields
serviceMap proxyServiceMap
endpointsMap proxyEndpointsMap
portsMap map[localPort]closeable
// endpointsSynced and servicesSynced are set to true when corresponding
// objects are synced after startup. This is used to avoid updating hns policies
// with some partial data after kube-proxy restart.
endpointsSynced bool
servicesSynced bool
initialized int32
syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules
// These are effectively const and do not need the mutex to be held.
masqueradeAll bool
masqueradeMark string
clusterCIDR string
hostname string
nodeIP net.IP
recorder record.EventRecorder
healthChecker healthcheck.Server
healthzServer healthcheck.HealthzUpdater
// Since converting probabilities (floats) to strings is expensive
// and we are using only probabilities in the format of 1/n, we are
// precomputing some number of those and cache for future reuse.
precomputedProbabilities []string
network hnsNetworkInfo
}
type localPort struct {
desc string
ip string
port int
protocol string
}
func (lp *localPort) String() string {
return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol)
}
func Enum(p api.Protocol) uint16 {
if p == api.ProtocolTCP {
return 6
}
if p == api.ProtocolUDP {
return 17
}
return 0
}
type closeable interface {
Close() error
}
// Proxier implements ProxyProvider
var _ proxy.ProxyProvider = &Proxier{}
// NewProxier returns a new Proxier
func NewProxier(
syncPeriod time.Duration,
minSyncPeriod time.Duration,
masqueradeAll bool,
masqueradeBit int,
clusterCIDR string,
hostname string,
nodeIP net.IP,
recorder record.EventRecorder,
healthzServer healthcheck.HealthzUpdater,
) (*Proxier, error) {
masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
if nodeIP == nil {
glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
nodeIP = net.ParseIP("127.0.0.1")
}
if len(clusterCIDR) == 0 {
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
}
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
// TODO : Make this a param
hnsNetworkName := os.Getenv("KUBE_NETWORK")
if len(hnsNetworkName) == 0 {
return nil, fmt.Errorf("Environment variable KUBE_NETWORK not initialized")
}
hnsNetwork, err := getHnsNetworkInfo(hnsNetworkName)
if err != nil {
glog.Fatalf("Unable to find Hns Network speficied by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName)
return nil, err
}
glog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork)
proxier := &Proxier{
portsMap: make(map[localPort]closeable),
serviceMap: make(proxyServiceMap),
serviceChanges: newServiceChangeMap(),
endpointsMap: make(proxyEndpointsMap),
endpointsChanges: newEndpointsChangeMap(hostname),
masqueradeAll: masqueradeAll,
masqueradeMark: masqueradeMark,
clusterCIDR: clusterCIDR,
hostname: hostname,
nodeIP: nodeIP,
recorder: recorder,
healthChecker: healthChecker,
healthzServer: healthzServer,
network: *hnsNetwork,
}
burstSyncs := 2
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
return proxier, nil
}
// CleanupLeftovers removes all hns rules created by the Proxier
// It returns true if an error was encountered. Errors are logged.
func CleanupLeftovers() (encounteredError bool) {
// Delete all Hns Load Balancer Policies
deleteAllHnsLoadBalancerPolicy()
// TODO
// Delete all Hns Remote endpoints
return encounteredError
}
func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []*endpointsInfo) {
Log(svcInfo, "Service Cleanup", 3)
if svcInfo.policyApplied {
svcInfo.deleteAllHnsLoadBalancerPolicy()
// Cleanup Endpoints references
for _, ep := range endpoints {
ep.Cleanup()
}
svcInfo.policyApplied = false
}
}
func (svcInfo *serviceInfo) deleteAllHnsLoadBalancerPolicy() {
// Remove the Hns Policy corresponding to this service
deleteHnsLoadBalancerPolicy(svcInfo.hnsID)
svcInfo.hnsID = ""
deleteHnsLoadBalancerPolicy(svcInfo.nodePorthnsID)
svcInfo.nodePorthnsID = ""
for _, externalIp := range svcInfo.externalIPs {
deleteHnsLoadBalancerPolicy(externalIp.hnsID)
externalIp.hnsID = ""
}
for _, lbIngressIp := range svcInfo.loadBalancerIngressIPs {
deleteHnsLoadBalancerPolicy(lbIngressIp.hnsID)
lbIngressIp.hnsID = ""
}
}
func deleteAllHnsLoadBalancerPolicy() {
plists, err := hcsshim.HNSListPolicyListRequest()
if err != nil {
return
}
for _, plist := range plists {
LogJson(plist, "Remove Policy", 3)
_, err = plist.Delete()
if err != nil {
glog.Errorf("%v", err)
}
}
}
// getHnsLoadBalancer returns the LoadBalancer policy resource, if already found.
// If not, it would create one and return
func getHnsLoadBalancer(endpoints []hcsshim.HNSEndpoint, isILB bool, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*hcsshim.PolicyList, error) {
plists, err := hcsshim.HNSListPolicyListRequest()
if err != nil {
return nil, err
}
for _, plist := range plists {
if len(plist.EndpointReferences) != len(endpoints) {
continue
}
// Validate if input meets any of the policy lists
elbPolicy := hcsshim.ELBPolicy{}
if err = json.Unmarshal(plist.Policies[0], &elbPolicy); err != nil {
continue
}
if elbPolicy.Protocol == protocol && elbPolicy.InternalPort == internalPort && elbPolicy.ExternalPort == externalPort && elbPolicy.ILB == isILB {
if len(vip) > 0 {
if len(elbPolicy.VIPs) == 0 || elbPolicy.VIPs[0] != vip {
continue
}
}
LogJson(plist, "Found existing Hns loadbalancer policy resource", 1)
return &plist, nil
}
}
//TODO: sourceVip is not used. If required, expose this as a param
var sourceVip string
lb, err := hcsshim.AddLoadBalancer(
endpoints,
isILB,
sourceVip,
vip,
protocol,
internalPort,
externalPort,
)
if err == nil {
LogJson(lb, "Hns loadbalancer policy resource", 1)
}
return lb, err
}
func deleteHnsLoadBalancerPolicy(hnsID string) {
if len(hnsID) == 0 {
// Return silently
return
}
// Cleanup HNS policies
hnsloadBalancer, err := hcsshim.GetPolicyListByID(hnsID)
if err != nil {
glog.Errorf("%v", err)
return
}
LogJson(hnsloadBalancer, "Removing Policy", 2)
_, err = hnsloadBalancer.Delete()
if err != nil {
glog.Errorf("%v", err)
}
}
func deleteHnsEndpoint(hnsID string) {
hnsendpoint, err := hcsshim.GetHNSEndpointByID(hnsID)
if err != nil {
glog.Errorf("%v", err)
return
}
_, err = hnsendpoint.Delete()
if err != nil {
glog.Errorf("%v", err)
}
glog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID)
}
func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) {
hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName)
if err != nil {
glog.Errorf("%v", err)
return nil, err
}
return &hnsNetworkInfo{
id: hnsnetwork.Id,
name: hnsnetwork.Name,
}, nil
}
func getHnsEndpointByIpAddress(ip net.IP, networkName string) (*hcsshim.HNSEndpoint, error) {
hnsnetwork, err := hcsshim.GetHNSNetworkByName(networkName)
if err != nil {
glog.Errorf("%v", err)
return nil, err
}
endpoints, err := hcsshim.HNSListEndpointRequest()
for _, endpoint := range endpoints {
equal := reflect.DeepEqual(endpoint.IPAddress, ip)
if equal && endpoint.VirtualNetwork == hnsnetwork.Id {
return &endpoint, nil
}
}
return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName)
}
// Sync is called to synchronize the proxier state to hns as soon as possible.
func (proxier *Proxier) Sync() {
proxier.syncRunner.Run()
}
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
func (proxier *Proxier) SyncLoop() {
// Update healthz timestamp at beginning in case Sync() never succeeds.
if proxier.healthzServer != nil {
proxier.healthzServer.UpdateTimestamp()
}
proxier.syncRunner.Loop(wait.NeverStop)
}
func (proxier *Proxier) setInitialized(value bool) {
var initialized int32
if value {
initialized = 1
}
atomic.StoreInt32(&proxier.initialized, initialized)
}
func (proxier *Proxier) isInitialized() bool {
return atomic.LoadInt32(&proxier.initialized) > 0
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, nil, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, oldService, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, service, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceSynced() {
proxier.mu.Lock()
proxier.servicesSynced = true
proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
func shouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
}
// <serviceMap> is updated by this function (based on the given changes).
// <changes> map is cleared after applying them.
func (proxier *Proxier) updateServiceMap() (result updateServiceMapResult) {
result.staleServices = sets.NewString()
var serviceMap proxyServiceMap = proxier.serviceMap
var changes *serviceChangeMap = &proxier.serviceChanges
func() {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
existingPorts := serviceMap.merge(change.current, proxier.endpointsMap)
serviceMap.unmerge(change.previous, existingPorts, result.staleServices, proxier.endpointsMap)
}
changes.items = make(map[types.NamespacedName]*serviceChange)
}()
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to serviceMap.
result.hcServices = make(map[types.NamespacedName]uint16)
for svcPortName, info := range serviceMap {
if info.healthCheckNodePort != 0 {
result.hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort)
}
}
return result
}
func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, nil, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, endpoints, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsSynced() {
proxier.mu.Lock()
proxier.endpointsSynced = true
proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
// <endpointsMap> is updated by this function (based on the given changes).
// <changes> map is cleared after applying them.
func (proxier *Proxier) updateEndpointsMap() (result updateEndpointMapResult) {
result.staleEndpoints = make(map[endpointServicePair]bool)
result.staleServiceNames = make(map[proxy.ServicePortName]bool)
var endpointsMap proxyEndpointsMap = proxier.endpointsMap
var changes *endpointsChangeMap = &proxier.endpointsChanges
func() {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
endpointsMap.unmerge(change.previous, proxier.serviceMap)
endpointsMap.merge(change.current, proxier.serviceMap)
}
changes.items = make(map[types.NamespacedName]*endpointsChange)
}()
if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
return
}
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to endpointsMap.
result.hcEndpoints = make(map[types.NamespacedName]int)
localIPs := getLocalIPs(endpointsMap)
for nsn, ips := range localIPs {
result.hcEndpoints[nsn] = len(ips)
}
return result
}
func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.String {
localIPs := make(map[types.NamespacedName]sets.String)
for svcPortName := range endpointsMap {
for _, ep := range endpointsMap[svcPortName] {
if ep.isLocal {
nsn := svcPortName.NamespacedName
if localIPs[nsn] == nil {
localIPs[nsn] = sets.NewString()
}
localIPs[nsn].Insert(ep.ip) // just the IP part
}
}
}
return localIPs
}
// Translates single Endpoints object to proxyEndpointsMap.
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEndpointsMap {
if endpoints == nil {
return nil
}
endpointsMap := make(proxyEndpointsMap)
// We need to build a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := proxy.ServicePortName{
NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name},
Port: port.Name,
}
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
isLocal := addr.NodeName != nil && *addr.NodeName == hostname
epInfo := newEndpointInfo(addr.IP, uint16(port.Port), isLocal)
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo)
}
if glog.V(3) {
newEPList := []*endpointsInfo{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep)
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
return endpointsMap
}
// Translates single Service object to proxyServiceMap.
//
// NOTE: service object should NOT be modified.
func serviceToServiceMap(service *api.Service) proxyServiceMap {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if shouldSkipService(svcName, service) {
return nil
}
serviceMap := make(proxyServiceMap)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
serviceMap[svcPortName] = newServiceInfo(svcPortName, servicePort, service)
}
return serviceMap
}
// This is where all of the hns save/restore calls happen.
// assumes proxier.mu is held
func (proxier *Proxier) syncProxyRules() {
proxier.mu.Lock()
defer proxier.mu.Unlock()
start := time.Now()
defer func() {
SyncProxyRulesLatency.Observe(sinceInMicroseconds(start))
glog.V(4).Infof("syncProxyRules took %v", time.Since(start))
}()
// don't sync rules till we've received services and endpoints
if !proxier.endpointsSynced || !proxier.servicesSynced {
glog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master")
return
}
// We assume that if this was called, we really want to sync them,
// even if nothing changed in the meantime. In other words, callers are
// responsible for detecting no-op changes and not calling this function.
serviceUpdateResult := proxier.updateServiceMap()
endpointUpdateResult := proxier.updateEndpointsMap()
staleServices := serviceUpdateResult.staleServices
// merge stale services gathered from updateEndpointsMap
for svcPortName := range endpointUpdateResult.staleServiceNames {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP {
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String())
staleServices.Insert(svcInfo.clusterIP.String())
}
}
glog.V(3).Infof("Syncing Policies")
// Program HNS by adding corresponding policies for each service.
for svcName, svcInfo := range proxier.serviceMap {
if svcInfo.policyApplied {
glog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo))
continue
}
var hnsEndpoints []hcsshim.HNSEndpoint
glog.V(4).Infof("====Applying Policy for %s====", svcName)
// Create Remote endpoints for every endpoint, corresponding to the service
for _, ep := range proxier.endpointsMap[svcName] {
var newHnsEndpoint *hcsshim.HNSEndpoint
hnsNetworkName := proxier.network.name
var err error
if len(ep.hnsID) > 0 {
newHnsEndpoint, err = hcsshim.GetHNSEndpointByID(ep.hnsID)
}
if newHnsEndpoint == nil {
// First check if an endpoint resource exists for this IP, on the current host
// A Local endpoint could exist here already
// A remote endpoint was already created and proxy was restarted
newHnsEndpoint, err = getHnsEndpointByIpAddress(net.ParseIP(ep.ip), hnsNetworkName)
}
if newHnsEndpoint == nil {
if ep.isLocal {
glog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName)
continue
}
// hns Endpoint resource was not found, create one
hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName)
if err != nil {
glog.Errorf("%v", err)
continue
}
hnsEndpoint := &hcsshim.HNSEndpoint{
MacAddress: ep.macAddress,
IPAddress: net.ParseIP(ep.ip),
}
newHnsEndpoint, err = hnsnetwork.CreateRemoteEndpoint(hnsEndpoint)
if err != nil {
glog.Errorf("Remote endpoint creation failed: %v", err)
continue
}
}
// Save the hnsId for reference
LogJson(newHnsEndpoint, "Hns Endpoint resource", 1)
hnsEndpoints = append(hnsEndpoints, *newHnsEndpoint)
ep.hnsID = newHnsEndpoint.Id
ep.refCount++
Log(ep, "Endpoint resource found", 3)
}
glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName)
if len(svcInfo.hnsID) > 0 {
// This should not happen
glog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID)
}
if len(hnsEndpoints) == 0 {
glog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName)
continue
}
glog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo))
var hnsLoadBalancer *hcsshim.PolicyList
hnsLoadBalancer, err := getHnsLoadBalancer(
hnsEndpoints,
false,
svcInfo.clusterIP.String(),
Enum(svcInfo.protocol),
uint16(svcInfo.targetPort),
uint16(svcInfo.port),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
continue
}
svcInfo.hnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
// If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints
if svcInfo.nodePort > 0 {
hnsLoadBalancer, err := getHnsLoadBalancer(
hnsEndpoints,
false,
"", // VIP has to be empty to automatically select the nodeIP
Enum(svcInfo.protocol),
uint16(svcInfo.targetPort),
uint16(svcInfo.nodePort),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
continue
}
svcInfo.nodePorthnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
}
// Create a Load Balancer Policy for each external IP
for _, externalIp := range svcInfo.externalIPs {
// Try loading existing policies, if already available
hnsLoadBalancer, err := getHnsLoadBalancer(
hnsEndpoints,
false,
externalIp.ip,
Enum(svcInfo.protocol),
uint16(svcInfo.targetPort),
uint16(svcInfo.port),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
continue
}
externalIp.hnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID)
}
// Create a Load Balancer Policy for each loadbalancer ingress
for _, lbIngressIp := range svcInfo.loadBalancerIngressIPs {
// Try loading existing policies, if already available
hnsLoadBalancer, err := getHnsLoadBalancer(
hnsEndpoints,
false,
lbIngressIp.ip,
Enum(svcInfo.protocol),
uint16(svcInfo.targetPort),
uint16(svcInfo.port),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
continue
}
lbIngressIp.hnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp)
}
svcInfo.policyApplied = true
Log(svcInfo, "+++Policy Successfully applied for service +++", 2)
}
// Update healthz timestamp.
if proxier.healthzServer != nil {
proxier.healthzServer.UpdateTimestamp()
}
// Update healthchecks. The endpoints list might include services that are
// not "OnlyLocal", but the services list will not, and the healthChecker
// will just drop those endpoints.
if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil {
glog.Errorf("Error syncing healtcheck services: %v", err)
}
if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil {
glog.Errorf("Error syncing healthcheck endoints: %v", err)
}
// Finish housekeeping.
// TODO: these could be made more consistent.
for _, svcIP := range staleServices.UnsortedList() {
// TODO : Check if this is required to cleanup stale services here
glog.V(5).Infof("Pending delete stale service IP %s connections", svcIP)
}
}
type endpointServicePair struct {
endpoint string
servicePortName proxy.ServicePortName
}
|
[
"\"KUBE_NETWORK\""
] |
[] |
[
"KUBE_NETWORK"
] |
[]
|
["KUBE_NETWORK"]
|
go
| 1 | 0 | |
test/utils/test_db.go
|
package utils
import (
"fmt"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
_ "github.com/lib/pq"
)
// TestDB initialize a db for testing
func TestDB() *gorm.DB {
var db *gorm.DB
var err error
var dbuser, dbpwd, dbname, dbhost = "qor", "qor", "qor_test", "localhost"
if os.Getenv("DB_USER") != "" {
dbuser = os.Getenv("DB_USER")
}
if os.Getenv("DB_PWD") != "" {
dbpwd = os.Getenv("DB_PWD")
}
if os.Getenv("DB_NAME") != "" {
dbname = os.Getenv("DB_NAME")
}
if os.Getenv("DB_HOST") != "" {
dbhost = os.Getenv("DB_HOST")
}
if os.Getenv("TEST_DB") == "mysql" {
// CREATE USER 'qor'@'localhost' IDENTIFIED BY 'qor';
// CREATE DATABASE qor_test;
// GRANT ALL ON qor_test.* TO 'qor'@'localhost';
db, err = gorm.Open("mysql", fmt.Sprintf("%s:%s@/%s?charset=utf8&parseTime=True&loc=Local", dbuser, dbpwd, dbname))
} else {
db, err = gorm.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", dbuser, dbpwd, dbhost, dbname))
}
if err != nil {
panic(err)
}
if os.Getenv("DEBUG") != "" {
db.LogMode(true)
}
return db
}
var db *gorm.DB
func GetTestDB() *gorm.DB {
if db != nil {
return db
}
db = TestDB()
return db
}
// PrepareDBAndTables prepare given tables cleanly and return a test database instance
func PrepareDBAndTables(tables ...interface{}) *gorm.DB {
db := GetTestDB()
ResetDBTables(db, tables...)
return db
}
// ResetDBTables reset given tables.
func ResetDBTables(db *gorm.DB, tables ...interface{}) {
Truncate(db, tables...)
AutoMigrate(db, tables...)
}
// Truncate receives table arguments and truncate their content in database.
func Truncate(db *gorm.DB, givenTables ...interface{}) {
// We need to iterate throught the list in reverse order of
// creation, since later tables may have constraints or
// dependencies on earlier tables.
len := len(givenTables)
for i := range givenTables {
table := givenTables[len-i-1]
db.DropTableIfExists(table)
}
}
// AutoMigrate receives table arguments and create or update their
// table structure in database.
func AutoMigrate(db *gorm.DB, givenTables ...interface{}) {
for _, table := range givenTables {
db.AutoMigrate(table)
if migratable, ok := table.(Migratable); ok {
exec(func() error { return migratable.AfterMigrate(db) })
}
}
}
// Migratable defines interface for implementing post-migration
// actions such as adding constraints that arent's supported by Gorm's
// struct tags. This function must be idempotent, since it will most
// likely be executed multiple times.
type Migratable interface {
AfterMigrate(db *gorm.DB) error
}
func exec(c func() error) {
if err := c(); err != nil {
panic(err)
}
}
|
[
"\"DB_USER\"",
"\"DB_USER\"",
"\"DB_PWD\"",
"\"DB_PWD\"",
"\"DB_NAME\"",
"\"DB_NAME\"",
"\"DB_HOST\"",
"\"DB_HOST\"",
"\"TEST_DB\"",
"\"DEBUG\""
] |
[] |
[
"DB_HOST",
"DB_NAME",
"TEST_DB",
"DEBUG",
"DB_PWD",
"DB_USER"
] |
[]
|
["DB_HOST", "DB_NAME", "TEST_DB", "DEBUG", "DB_PWD", "DB_USER"]
|
go
| 6 | 0 | |
tests/unit.py
|
import unittest
import sys
import os
import re
import time
import shutil
import tempfile
import threading
import errno
sys.path.append(os.path.join("..", "src", "bin"))
from web_ping import URLField, DurationField, WebPing, NTLMAuthenticationValueException
from modular_input import Field, FieldValidationException
from website_monitoring_rest_handler import HostFieldValidator
from website_monitoring_app import requests
from six import StringIO
from unit_test_web_server import UnitTestWithWebServer, skipIfNoServer
from test_proxy_server import get_server as get_proxy_server
def skipIfNoProxyServer(func):
def _decorator(self, *args, **kwargs):
if not hasattr(self, 'proxy_address') or self.proxy_address is None:
self.skipTest("No proxy address defined, proxy based test will not run")
return
elif not hasattr(self, 'proxy_port') or self.proxy_port is None:
self.skipTest("No proxy port defined, proxy based test will not run")
return
elif not hasattr(self, 'proxy_type') or self.proxy_type is None:
self.skipTest("No proxy type defined, proxy based test will not run")
return
else:
return func(self, *args, **kwargs)
return _decorator
class WebsiteMonitoringAppTest(unittest.TestCase):
DEFAULT_TEST_PROXY_SERVER_PORT = 21080
warned_about_no_proxyd = False
proxyd = None
proxy_address = None
proxy_port = None
proxy_type = None
config_loaded = False
def toInt(self, str_int):
if str_int is None:
return None
else:
return int(str_int)
def loadConfig(self, properties_file=None):
# Stop if we already loaded the configuration
if WebsiteMonitoringAppTest.config_loaded:
return
# Load the port from the environment if possible. This might be get overridden by the local.properties file.
WebsiteMonitoringAppTest.proxy_port = int(os.environ.get("TEST_PROXY_SERVER_PORT", WebsiteMonitoringAppTest.DEFAULT_TEST_PROXY_SERVER_PORT))
fp = None
if properties_file is None:
properties_file = os.path.join("..", "local.properties")
try:
fp = open(properties_file)
except IOError:
pass
if fp is not None:
regex = re.compile("(?P<key>[^=]+)[=](?P<value>.*)")
settings = {}
for l in fp.readlines():
r = regex.search(l)
if r is not None:
d = r.groupdict()
settings[d["key"]] = d["value"]
# Load the parameters from the
WebsiteMonitoringAppTest.proxy_address = settings.get("value.test.proxy.address", WebsiteMonitoringAppTest.proxy_address)
WebsiteMonitoringAppTest.proxy_port = self.toInt(settings.get("value.test.proxy.port", WebsiteMonitoringAppTest.proxy_port))
WebsiteMonitoringAppTest.proxy_type = settings.get("value.test.proxy.type", None)
# If no proxy was defined, use the internal proxy server for testing
if WebsiteMonitoringAppTest.proxyd is None and WebsiteMonitoringAppTest.proxy_address is None:
WebsiteMonitoringAppTest.proxy_address = "127.0.0.1"
WebsiteMonitoringAppTest.proxy_port = WebsiteMonitoringAppTest.proxy_port
WebsiteMonitoringAppTest.proxy_type = "http"
WebsiteMonitoringAppTest.proxyd = get_proxy_server(WebsiteMonitoringAppTest.proxy_port)
def start_server(proxyd):
proxyd.serve_forever()
t = threading.Thread(target=start_server, args = (WebsiteMonitoringAppTest.proxyd,))
t.daemon = True
t.start()
# Note that we loaded the config already so that we don't try it again.
WebsiteMonitoringAppTest.config_loaded = True
def setUp(self):
self.loadConfig()
class TestHostFieldValidator(unittest.TestCase):
def test_underscore_allowed(self):
# http://lukemurphey.net/issues/1002
# http://answers.splunk.com/answers/233571/website-monitoring-is-not-working-with-proxy-setup.html
validator = HostFieldValidator()
self.assertTrue(validator.is_valid_hostname("my_proxy.localhost.com"))
class TestURLField(unittest.TestCase):
def test_url_field_valid(self):
url_field = URLField("test_url_field_valid", "title", "this is a test")
self.assertEqual(url_field.to_python("http://google.com").geturl(), "http://google.com")
self.assertEqual(url_field.to_python("http://google.com/with/path").geturl(), "http://google.com/with/path")
self.assertEqual(url_field.to_python("http://google.com:8080/with/port").geturl(), "http://google.com:8080/with/port")
def test_url_field_invalid(self):
url_field = URLField("test_url_field_invalid", "title", "this is a test")
self.assertRaises(FieldValidationException, lambda: url_field.to_python("hxxp://google.com"))
self.assertRaises(FieldValidationException, lambda: url_field.to_python("http://"))
self.assertRaises(FieldValidationException, lambda: url_field.to_python("google.com"))
class TestDurationField(unittest.TestCase):
def test_duration_valid(self):
duration_field = DurationField("test_duration_valid", "title", "this is a test")
self.assertEqual(duration_field.to_python("1m"), 60)
self.assertEqual(duration_field.to_python("5m"), 300)
self.assertEqual(duration_field.to_python("5 minute"), 300)
self.assertEqual(duration_field.to_python("5"), 5)
self.assertEqual(duration_field.to_python("5h"), 18000)
self.assertEqual(duration_field.to_python("2d"), 172800)
self.assertEqual(duration_field.to_python("2w"), 86400 * 7 * 2)
def test_url_field_invalid(self):
duration_field = DurationField("test_url_field_invalid", "title", "this is a test")
self.assertRaises(FieldValidationException, lambda: duration_field.to_python("1 treefrog"))
self.assertRaises(FieldValidationException, lambda: duration_field.to_python("minute"))
def skipIfNoServer(func):
def _decorator(self, *args, **kwargs):
if self.httpd is None:
# Don't run the test if the server is not running
self.skipTest("The web-server is not running")
else:
return func(self, *args, **kwargs)
return _decorator
class TestWebPing(WebsiteMonitoringAppTest, UnitTestWithWebServer):
def setUp(self):
super(TestWebPing, self).setUp()
self.tmp_dir = tempfile.mkdtemp(prefix="TestWebPing")
#os.makedirs(self.tmp_dir)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_cleanup_threads(self):
threads_running = [1, 2]
max_runs = 10
def thread_function(n):
total_time = 0
while n in threads_running and total_time < max_runs:
time.sleep(1)
total_time += 1
thread_1 = threading.Thread(target=thread_function, args=(1,))
thread_2 = threading.Thread(target=thread_function, args=(2,))
threads = {
'1': thread_1,
'2': thread_2
}
thread_1.start()
thread_2.start()
web_ping = WebPing()
self.assertEqual(len(threads), 2)
self.assertEqual(web_ping.cleanup_threads(threads), 0)
self.assertEqual(len(threads), 2)
# Stop the first thread and wait for it to complete
threads_running = [2]
thread_1.join()
self.assertEqual(web_ping.cleanup_threads(threads), 1)
self.assertEqual(len(threads), 1)
# Stop the second thread and wait for it to complete
threads_running = []
thread_2.join()
self.assertEqual(web_ping.cleanup_threads(threads), 1)
self.assertEqual(len(threads), 0)
def test_get_file_path(self):
self.assertEqual(WebPing.get_file_path("/Users/lmurphey/Applications/splunk/var/lib/splunk/modinputs/web_ping", "web_ping://TextCritical.com"), "/Users/lmurphey/Applications/splunk/var/lib/splunk/modinputs/web_ping" + os.sep + "35163af7282b92013f810b2b4822d7df.json")
def test_ping(self):
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3)
self.assertEqual(result.response_code, 200)
self.assertGreater(result.request_time, 0)
"""
def test_ping_include_ip(self):
url_field = URLField("test_ping_include_ip", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3)
self.assertGreater(result.response_ip, '127.0.0.1')
"""
def test_ping_super_long_url(self):
# https://answers.splunk.com/answers/488784/why-my-website-monitoring-only-check-1-time.html
url_field = URLField("test_ping", "title", "this is a test")
#result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page?s=superloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong"), timeout=3)
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page_superlooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong"), timeout=3)
self.assertEqual(result.response_code, 200)
self.assertGreater(result.request_time, 0)
def test_ping_non_existent_domain(self):
# https://answers.splunk.com/answers/337070/website-monitoring-app-setup.html#answer-338487
url_field = URLField("test_ping_non_existent_domain", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://xyz"), timeout=3)
self.assertEqual(result.response_code, 0)
self.assertEqual(result.request_time, 0)
def test_ping_timeout(self):
url_field = URLField("test_ping_timeout", "title", "this is a test")
result = WebPing.ping(url_field.to_python("https://192.168.30.23/"), timeout=3)
self.assertEqual(result.timed_out, True)
def test_ping_with_headers(self):
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3, return_headers=True)
self.assertEqual(result.response_code, 200)
self.assertGreater(result.request_time, 0)
self.assertGreater(len(result.headers), 0)
self.assertEqual(result.headers['Content-type'], 'text/html')
def test_is_exception_for_timeout(self):
try:
r = requests.get('https://192.168.30.23/')
except requests.exceptions.ConnectionError as e:
if not WebPing.isExceptionForTimeout(e):
print(e)
self.assertTrue(WebPing.isExceptionForTimeout(e))
def test_save_checkpoint(self):
web_ping = WebPing()
web_ping.save_checkpoint(self.tmp_dir, "web_ping://TextCritical.com", 100)
self.assertEqual(web_ping.last_ran(self.tmp_dir, "web_ping://TextCritical.com"), 100)
def test_is_expired(self):
self.assertFalse(WebPing.is_expired(time.time(), 30))
self.assertTrue(WebPing.is_expired(time.time() - 31, 30))
def get_test_dir(self):
return os.path.dirname(os.path.abspath(__file__))
def test_needs_another_run(self):
# Test case where file does not exist
self.assertTrue(WebPing.needs_another_run("/Users/lmurphey/Applications/splunk/var/lib/splunk/modinputs/web_ping", "web_ping://DoesNotExist", 60))
# Test an interval right at the earlier edge
self.assertFalse(WebPing.needs_another_run(os.path.join(self.get_test_dir(), "configs"), "web_ping://TextCritical.com", 60, 1365486765))
# Test an interval at the later edge
self.assertFalse(WebPing.needs_another_run(os.path.join(self.get_test_dir(), "configs"), "web_ping://TextCritical.com", 10, 1365486775))
# Test interval beyond later edge
self.assertTrue(WebPing.needs_another_run(os.path.join(self.get_test_dir(), "configs"), "web_ping://TextCritical.com", 10, 1365486776))
def test_output_result(self):
web_ping = WebPing(timeout=3)
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3)
out = StringIO()
web_ping.output_result(result, "stanza", "title", unbroken=True, close=True, out=out)
self.assertTrue(out.getvalue().find("response_code=200") >= 0)
def test_output_result_unavailable(self):
web_ping = WebPing(timeout=3)
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://192.168.30.23/"), timeout=3)
out = StringIO()
web_ping.output_result(result, "stanza", "title", unbroken=True, close=True, out=out)
self.assertTrue(out.getvalue().find("timed_out=True") >= 0)
def test_bad_checkpoint(self):
web_ping = WebPing()
# Make sure the call does return the expected error (is attempting to load the data data)
with self.assertRaises(ValueError):
web_ping.get_checkpoint_data(os.path.join(self.get_test_dir(), "configs"), throw_errors=True)
# Make sure the test returns None
data = web_ping.get_checkpoint_data(os.path.join(self.get_test_dir(), "configs", "web_ping://TextCritical.net"))
self.assertEqual(data, None)
@skipIfNoServer
def test_hash(self):
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3)
self.assertEqual(result.response_code, 200)
self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk
self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23')
def test_missing_servername(self):
"""
Some web-servers require that the "Host" be included on SSL connections when the server is hosting multiple domains on the same IP.
Without the host header, the server is unable to determine which certificate to provide and thus closes the connection.
http://lukemurphey.net/issues/1035
"""
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("https://lukemurphey.net"), timeout=3)
self.assertEqual(result.response_code, 200)
@skipIfNoProxyServer
def test_ping_over_proxy(self):
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://textcritical.com"), timeout=3, proxy_type=self.proxy_type, proxy_server=self.proxy_address, proxy_port=self.proxy_port)
self.assertEqual(result.response_code, 200)
@skipIfNoServer
def test_ping_with_basic_authentication(self):
# Try with valid authentication
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port)), timeout=3, username="admin", password="changeme")
self.assertEqual(result.response_code, 200)
self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk
self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23')
# Verify that bad authentication fails
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port)), timeout=3, username="admin", password="wrongpassword")
self.assertEqual(result.response_code, 401)
self.assertGreater(result.request_time, 0)
def test_ping_with_digest_authentication(self):
# Try with valid authentication
url_field = URLField( "test_ping", "title", "this is a test")
result = WebPing.ping( url_field.to_python("http://httpbin.org/digest-auth/auth/user/passwd"), timeout=3, username="user", password="passwd")
self.assertEqual(result.response_code, 200)
@skipIfNoServer
def test_ping_with_ntlm_authentication(self):
# Try with valid authentication
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth"), timeout=3, username="user\\domain", password="passwd", raise_all=True)
self.assertEqual(result.response_code, 200)
@skipIfNoServer
def test_ping_with_ntlm_negotiate_authentication(self):
# Try with valid authentication
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth_negotiate"), timeout=3, username="user\\domain", password="passwd")
self.assertEqual(result.response_code, 200)
def test_ping_with_ntlm_authentication_missing_domain(self):
# Try with missing domain
url_field = URLField( "test_ping", "title", "this is a test")
self.assertRaises(NTLMAuthenticationValueException, lambda: WebPing.ping( url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth"), timeout=3, username="user", password="passwd"))
@skipIfNoServer
def test_ping_with_basic_authentication_optional(self):
# Try with valid authentication
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/optional_auth"), timeout=3, username="admin", password="changeme")
self.assertEqual(result.response_code, 203)
# Verify that no authentication still works
result = WebPing.ping( url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/optional_auth"), timeout=3)
self.assertEqual(result.response_code, 202)
self.assertGreater(result.request_time, 0)
@skipIfNoServer
def test_determine_auth_method_basic(self):
# Try with basic auth
url_field = URLField( "test_ping", "title", "this is a test")
auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port)))
self.assertEqual(auth_type, WebPing.HTTP_AUTH_BASIC)
def test_determine_auth_method_digest(self):
# Try with digest auth
url_field = URLField( "test_ping", "title", "this is a test")
auth_type = WebPing.determine_auth_type(url_field.to_python("http://httpbin.org/digest-auth/auth/user/passwd"))
self.assertEqual(auth_type, WebPing.HTTP_AUTH_DIGEST)
@skipIfNoServer
def test_determine_auth_method_ntlm(self):
# Try with digest auth
url_field = URLField( "test_ping", "title", "this is a test")
auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth"))
self.assertEqual(auth_type, WebPing.HTTP_AUTH_NTLM)
@skipIfNoServer
def test_determine_auth_method_ntlm_comma_header(self):
# Try with digest auth
url_field = URLField( "test_ping", "title", "this is a test")
auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth_negotiate"))
self.assertEqual(auth_type, WebPing.HTTP_AUTH_NTLM)
@skipIfNoServer
def test_determine_auth_method_none(self):
# Try with digest auth
url_field = URLField( "test_ping", "title", "this is a test")
auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"))
self.assertEqual(auth_type, WebPing.HTTP_AUTH_NONE)
@skipIfNoServer
def test_custom_user_agent(self):
"""
http://lukemurphey.net/issues/1341
"""
url_field = URLField("test_ping", "title", "this is a test")
# Make sure that the server is validating the user-agent by returning 200 when the user-agent doesn't match
# This just validates that the test case works
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/user_agent_check"), user_agent="USER_AGENT_CHECK_DOESNT_MATCH", timeout=3)
self.assertEqual(result.response_code, 200)
# Make sure that the server is validating the user-agent which returns 201 when the user-agent matches "USER_AGENT_CHECK"
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/user_agent_check"), user_agent="USER_AGENT_CHECK", timeout=3)
self.assertEqual(result.response_code, 201)
@skipIfNoServer
def test_should_contain_string(self):
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3, should_contain_string="<h1>My First Heading</h1>")
self.assertEqual(result.response_code, 200)
self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk
self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23')
self.assertEqual(result.has_expected_string, True)
@skipIfNoServer
def test_should_contain_string_no_match(self):
url_field = URLField("test_ping", "title", "this is a test")
result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3, should_contain_string="<h1>Should not Match!</h1>")
self.assertEqual(result.response_code, 200)
self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk
self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23')
self.assertEqual(result.has_expected_string, False)
class TestOnCloud(unittest.TestCase):
def setUp(self):
super(TestOnCloud, self).setUp()
# Configure an instance of the class to test
self.web_ping = WebPing()
# Force the class to act like it is on cloud
self.web_ping.is_on_cloud = self.fake_is_on_cloud
def fake_is_on_cloud(self, session_key):
return True
def test_get_proxy_config(self):
# See https://lukemurphey.net/issues/2445
self.web_ping.is_on_cloud = self.fake_is_on_cloud
self.web_ping.get_proxy_config('a session key')
self.assertEqual(self.web_ping.get_proxy_config('a session key'), ("http", None, None, None, None, None))
if __name__ == '__main__':
try:
unittest.main()
finally:
if WebsiteMonitoringAppTest.proxyd is not None:
WebsiteMonitoringAppTest.proxyd.shutdown()
|
[] |
[] |
[
"TEST_PROXY_SERVER_PORT"
] |
[]
|
["TEST_PROXY_SERVER_PORT"]
|
python
| 1 | 0 | |
setup.py
|
#!/usr/bin/env python
import os
import subprocess
import tarfile
import shutil
import sysconfig
import requests
from setuptools import setup
from setuptools.command.build_ext import build_ext
from setuptools.extension import Extension
def urlretrieve(source_url, destination_path):
response = requests.get(source_url, stream=True)
if response.status_code != 200:
raise Exception("status code was: {}".format(response.status_code))
with open(destination_path, "wb") as fileobj:
for chunk in response.iter_content(chunk_size=128):
fileobj.write(chunk)
def path_in_dir(relative_path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), relative_path))
def dependency_path(relative_path):
return os.path.join(path_in_dir("_deps"), relative_path)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
jq_lib_tarball_path = dependency_path("jq-lib-1.6.tar.gz")
jq_lib_dir = dependency_path("jq-1.6")
oniguruma_version = "6.9.4"
oniguruma_lib_tarball_path = dependency_path("onig-{}.tar.gz".format(oniguruma_version))
oniguruma_lib_build_dir = dependency_path("onig-{}".format(oniguruma_version))
oniguruma_lib_install_dir = dependency_path("onig-install-{}".format(oniguruma_version))
class jq_build_ext(build_ext):
def run(self):
if not os.path.exists(dependency_path(".")):
os.makedirs(dependency_path("."))
self._build_oniguruma()
self._build_libjq()
build_ext.run(self)
def _build_oniguruma(self):
self._build_lib(
source_url="https://github.com/kkos/oniguruma/releases/download/v{0}/onig-{0}.tar.gz".format(oniguruma_version),
tarball_path=oniguruma_lib_tarball_path,
lib_dir=oniguruma_lib_build_dir,
commands=[
["./configure", "CFLAGS=-fPIC", "--prefix=" + oniguruma_lib_install_dir],
["make"],
["make", "install"],
])
def _build_libjq(self):
self._build_lib(
source_url="https://github.com/stedolan/jq/releases/download/jq-1.6/jq-1.6.tar.gz",
tarball_path=jq_lib_tarball_path,
lib_dir=jq_lib_dir,
commands=[
["./configure", "CFLAGS=-fPIC -pthread", "--disable-maintainer-mode", "--with-oniguruma=" + oniguruma_lib_install_dir],
["make"],
])
def _build_lib(self, source_url, tarball_path, lib_dir, commands):
self._download_tarball(
source_url=source_url,
tarball_path=tarball_path,
lib_dir=lib_dir,
)
macosx_deployment_target = sysconfig.get_config_var("MACOSX_DEPLOYMENT_TARGET")
if macosx_deployment_target:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(macosx_deployment_target)
def run_command(args):
print("Executing: %s" % ' '.join(args))
subprocess.check_call(args, cwd=lib_dir)
for command in commands:
run_command(command)
def _download_tarball(self, source_url, tarball_path, lib_dir):
if os.path.exists(tarball_path):
os.unlink(tarball_path)
print("Downloading {}".format(source_url))
urlretrieve(source_url, tarball_path)
print("Downloaded {}".format(source_url))
if os.path.exists(lib_dir):
shutil.rmtree(lib_dir)
tarfile.open(tarball_path, "r:gz").extractall(dependency_path("."))
jq_extension = Extension(
"jq",
sources=["jq.c"],
include_dirs=[os.path.join(jq_lib_dir, "src")],
extra_link_args=["-lm"],
extra_objects=[
os.path.join(jq_lib_dir, ".libs/libjq.a"),
os.path.join(oniguruma_lib_install_dir, "lib/libonig.a"),
],
)
setup(
name='jq',
version='1.2.1',
description='jq is a lightweight and flexible JSON processor.',
long_description=read("README.rst"),
author='Michael Williamson',
url='http://github.com/mwilliamson/jq.py',
python_requires='>=3.5',
license='BSD 2-Clause',
ext_modules = [jq_extension],
cmdclass={"build_ext": jq_build_ext},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
[] |
[] |
[
"MACOSX_DEPLOYMENT_TARGET"
] |
[]
|
["MACOSX_DEPLOYMENT_TARGET"]
|
python
| 1 | 0 | |
mod/github.com/hashicorp/[email protected]/physical/consul/consul_test.go
|
package consul
import (
"fmt"
"math/rand"
"os"
"reflect"
"sync"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/physical"
dockertest "gopkg.in/ory-am/dockertest.v2"
)
type consulConf map[string]string
var (
addrCount int = 0
testImagePull sync.Once
)
func testHostIP() string {
a := addrCount
addrCount++
return fmt.Sprintf("127.0.0.%d", a)
}
func testConsulBackend(t *testing.T) *ConsulBackend {
return testConsulBackendConfig(t, &consulConf{})
}
func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend {
logger := logging.NewVaultLogger(log.Debug)
be, err := NewConsulBackend(*conf, logger)
if err != nil {
t.Fatalf("Expected Consul to initialize: %v", err)
}
c, ok := be.(*ConsulBackend)
if !ok {
t.Fatalf("Expected ConsulBackend")
}
return c
}
func testConsul_testConsulBackend(t *testing.T) {
c := testConsulBackend(t)
if c == nil {
t.Fatalf("bad")
}
}
func testActiveFunc(activePct float64) physical.ActiveFunction {
return func() bool {
var active bool
standbyProb := rand.Float64()
if standbyProb > activePct {
active = true
}
return active
}
}
func testSealedFunc(sealedPct float64) physical.SealedFunction {
return func() bool {
var sealed bool
unsealedProb := rand.Float64()
if unsealedProb > sealedPct {
sealed = true
}
return sealed
}
}
func testPerformanceStandbyFunc(perfPct float64) physical.PerformanceStandbyFunction {
return func() bool {
var ps bool
unsealedProb := rand.Float64()
if unsealedProb > perfPct {
ps = true
}
return ps
}
}
func TestConsul_ServiceTags(t *testing.T) {
consulConfig := map[string]string{
"path": "seaTech/",
"service": "astronomy",
"service_tags": "deadbeef, cafeefac, deadc0de, feedface",
"redirect_addr": "http://127.0.0.2:8200",
"check_timeout": "6s",
"address": "127.0.0.2",
"scheme": "https",
"token": "deadbeef-cafeefac-deadc0de-feedface",
"max_parallel": "4",
"disable_registration": "false",
}
logger := logging.NewVaultLogger(log.Debug)
be, err := NewConsulBackend(consulConfig, logger)
if err != nil {
t.Fatal(err)
}
c, ok := be.(*ConsulBackend)
if !ok {
t.Fatalf("failed to create physical Consul backend")
}
expected := []string{"deadbeef", "cafeefac", "deadc0de", "feedface"}
actual := c.fetchServiceTags(false, false)
if !strutil.EquivalentSlices(actual, append(expected, "standby")) {
t.Fatalf("bad: expected:%s actual:%s", append(expected, "standby"), actual)
}
actual = c.fetchServiceTags(true, false)
if !strutil.EquivalentSlices(actual, append(expected, "active")) {
t.Fatalf("bad: expected:%s actual:%s", append(expected, "active"), actual)
}
actual = c.fetchServiceTags(false, true)
if !strutil.EquivalentSlices(actual, append(expected, "performance-standby")) {
t.Fatalf("bad: expected:%s actual:%s", append(expected, "performance-standby"), actual)
}
actual = c.fetchServiceTags(true, true)
if !strutil.EquivalentSlices(actual, append(expected, "performance-standby")) {
t.Fatalf("bad: expected:%s actual:%s", append(expected, "performance-standby"), actual)
}
}
func TestConsul_ServiceAddress(t *testing.T) {
tests := []struct {
consulConfig map[string]string
serviceAddrNil bool
}{
{
consulConfig: map[string]string{
"service_address": "",
},
},
{
consulConfig: map[string]string{
"service_address": "vault.example.com",
},
},
{
serviceAddrNil: true,
},
}
for _, test := range tests {
logger := logging.NewVaultLogger(log.Debug)
be, err := NewConsulBackend(test.consulConfig, logger)
if err != nil {
t.Fatalf("expected Consul to initialize: %v", err)
}
c, ok := be.(*ConsulBackend)
if !ok {
t.Fatalf("Expected ConsulBackend")
}
if test.serviceAddrNil {
if c.serviceAddress != nil {
t.Fatalf("expected service address to be nil")
}
} else {
if c.serviceAddress == nil {
t.Fatalf("did not expect service address to be nil")
}
}
}
}
func TestConsul_newConsulBackend(t *testing.T) {
tests := []struct {
name string
consulConfig map[string]string
fail bool
redirectAddr string
checkTimeout time.Duration
path string
service string
address string
scheme string
token string
max_parallel int
disableReg bool
consistencyMode string
}{
{
name: "Valid default config",
consulConfig: map[string]string{},
checkTimeout: 5 * time.Second,
redirectAddr: "http://127.0.0.1:8200",
path: "vault/",
service: "vault",
address: "127.0.0.1:8500",
scheme: "http",
token: "",
max_parallel: 4,
disableReg: false,
consistencyMode: "default",
},
{
name: "Valid modified config",
consulConfig: map[string]string{
"path": "seaTech/",
"service": "astronomy",
"redirect_addr": "http://127.0.0.2:8200",
"check_timeout": "6s",
"address": "127.0.0.2",
"scheme": "https",
"token": "deadbeef-cafeefac-deadc0de-feedface",
"max_parallel": "4",
"disable_registration": "false",
"consistency_mode": "strong",
},
checkTimeout: 6 * time.Second,
path: "seaTech/",
service: "astronomy",
redirectAddr: "http://127.0.0.2:8200",
address: "127.0.0.2",
scheme: "https",
token: "deadbeef-cafeefac-deadc0de-feedface",
max_parallel: 4,
consistencyMode: "strong",
},
{
name: "check timeout too short",
fail: true,
consulConfig: map[string]string{
"check_timeout": "99ms",
},
},
}
for _, test := range tests {
logger := logging.NewVaultLogger(log.Debug)
be, err := NewConsulBackend(test.consulConfig, logger)
if test.fail {
if err == nil {
t.Fatalf(`Expected config "%s" to fail`, test.name)
} else {
continue
}
} else if !test.fail && err != nil {
t.Fatalf("Expected config %s to not fail: %v", test.name, err)
}
c, ok := be.(*ConsulBackend)
if !ok {
t.Fatalf("Expected ConsulBackend: %s", test.name)
}
c.disableRegistration = true
if c.disableRegistration == false {
addr := os.Getenv("CONSUL_HTTP_ADDR")
if addr == "" {
continue
}
}
var shutdownCh physical.ShutdownChannel
waitGroup := &sync.WaitGroup{}
if err := c.RunServiceDiscovery(waitGroup, shutdownCh, test.redirectAddr, testActiveFunc(0.5), testSealedFunc(0.5), testPerformanceStandbyFunc(0.5)); err != nil {
t.Fatalf("bad: %v", err)
}
if test.checkTimeout != c.checkTimeout {
t.Errorf("bad: %v != %v", test.checkTimeout, c.checkTimeout)
}
if test.path != c.path {
t.Errorf("bad: %s %v != %v", test.name, test.path, c.path)
}
if test.service != c.serviceName {
t.Errorf("bad: %v != %v", test.service, c.serviceName)
}
if test.consistencyMode != c.consistencyMode {
t.Errorf("bad consistency_mode value: %v != %v", test.consistencyMode, c.consistencyMode)
}
// FIXME(sean@): Unable to test max_parallel
// if test.max_parallel != cap(c.permitPool) {
// t.Errorf("bad: %v != %v", test.max_parallel, cap(c.permitPool))
// }
}
}
func TestConsul_serviceTags(t *testing.T) {
tests := []struct {
active bool
perfStandby bool
tags []string
}{
{
active: true,
perfStandby: false,
tags: []string{"active"},
},
{
active: false,
perfStandby: false,
tags: []string{"standby"},
},
{
active: false,
perfStandby: true,
tags: []string{"performance-standby"},
},
{
active: true,
perfStandby: true,
tags: []string{"performance-standby"},
},
}
c := testConsulBackend(t)
for _, test := range tests {
tags := c.fetchServiceTags(test.active, test.perfStandby)
if !reflect.DeepEqual(tags[:], test.tags[:]) {
t.Errorf("Bad %v: %v %v", test.active, tags, test.tags)
}
}
}
func TestConsul_setRedirectAddr(t *testing.T) {
tests := []struct {
addr string
host string
port int64
pass bool
}{
{
addr: "http://127.0.0.1:8200/",
host: "127.0.0.1",
port: 8200,
pass: true,
},
{
addr: "http://127.0.0.1:8200",
host: "127.0.0.1",
port: 8200,
pass: true,
},
{
addr: "https://127.0.0.1:8200",
host: "127.0.0.1",
port: 8200,
pass: true,
},
{
addr: "unix:///tmp/.vault.addr.sock",
host: "/tmp/.vault.addr.sock",
port: -1,
pass: true,
},
{
addr: "127.0.0.1:8200",
pass: false,
},
{
addr: "127.0.0.1",
pass: false,
},
}
for _, test := range tests {
c := testConsulBackend(t)
err := c.setRedirectAddr(test.addr)
if test.pass {
if err != nil {
t.Fatalf("bad: %v", err)
}
} else {
if err == nil {
t.Fatalf("bad, expected fail")
} else {
continue
}
}
if c.redirectHost != test.host {
t.Fatalf("bad: %v != %v", c.redirectHost, test.host)
}
if c.redirectPort != test.port {
t.Fatalf("bad: %v != %v", c.redirectPort, test.port)
}
}
}
func TestConsul_NotifyActiveStateChange(t *testing.T) {
c := testConsulBackend(t)
if err := c.NotifyActiveStateChange(); err != nil {
t.Fatalf("bad: %v", err)
}
}
func TestConsul_NotifySealedStateChange(t *testing.T) {
c := testConsulBackend(t)
if err := c.NotifySealedStateChange(); err != nil {
t.Fatalf("bad: %v", err)
}
}
func TestConsul_serviceID(t *testing.T) {
tests := []struct {
name string
redirectAddr string
serviceName string
expected string
valid bool
}{
{
name: "valid host w/o slash",
redirectAddr: "http://127.0.0.1:8200",
serviceName: "sea-tech-astronomy",
expected: "sea-tech-astronomy:127.0.0.1:8200",
valid: true,
},
{
name: "valid host w/ slash",
redirectAddr: "http://127.0.0.1:8200/",
serviceName: "sea-tech-astronomy",
expected: "sea-tech-astronomy:127.0.0.1:8200",
valid: true,
},
{
name: "valid https host w/ slash",
redirectAddr: "https://127.0.0.1:8200/",
serviceName: "sea-tech-astronomy",
expected: "sea-tech-astronomy:127.0.0.1:8200",
valid: true,
},
{
name: "invalid host name",
redirectAddr: "https://127.0.0.1:8200/",
serviceName: "sea_tech_astronomy",
expected: "",
valid: false,
},
}
logger := logging.NewVaultLogger(log.Debug)
for _, test := range tests {
be, err := NewConsulBackend(consulConf{
"service": test.serviceName,
}, logger)
if !test.valid {
if err == nil {
t.Fatalf("expected an error initializing for name %q", test.serviceName)
}
continue
}
if test.valid && err != nil {
t.Fatalf("expected Consul to initialize: %v", err)
}
c, ok := be.(*ConsulBackend)
if !ok {
t.Fatalf("Expected ConsulBackend")
}
if err := c.setRedirectAddr(test.redirectAddr); err != nil {
t.Fatalf("bad: %s %v", test.name, err)
}
serviceID := c.serviceID()
if serviceID != test.expected {
t.Fatalf("bad: %v != %v", serviceID, test.expected)
}
}
}
func TestConsulBackend(t *testing.T) {
var token string
addr := os.Getenv("CONSUL_HTTP_ADDR")
if addr == "" {
cid, connURL := prepareTestContainer(t)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
addr = connURL
token = dockertest.ConsulACLMasterToken
}
conf := api.DefaultConfig()
conf.Address = addr
conf.Token = token
client, err := api.NewClient(conf)
if err != nil {
t.Fatalf("err: %v", err)
}
randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
defer func() {
client.KV().DeleteTree(randPath, nil)
}()
logger := logging.NewVaultLogger(log.Debug)
b, err := NewConsulBackend(map[string]string{
"address": conf.Address,
"path": randPath,
"max_parallel": "256",
"token": conf.Token,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}
func TestConsulHABackend(t *testing.T) {
var token string
addr := os.Getenv("CONSUL_HTTP_ADDR")
if addr == "" {
cid, connURL := prepareTestContainer(t)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
addr = connURL
token = dockertest.ConsulACLMasterToken
}
conf := api.DefaultConfig()
conf.Address = addr
conf.Token = token
client, err := api.NewClient(conf)
if err != nil {
t.Fatalf("err: %v", err)
}
randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
defer func() {
client.KV().DeleteTree(randPath, nil)
}()
logger := logging.NewVaultLogger(log.Debug)
config := map[string]string{
"address": conf.Address,
"path": randPath,
"max_parallel": "-1",
"token": conf.Token,
}
b, err := NewConsulBackend(config, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
b2, err := NewConsulBackend(config, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend))
detect, ok := b.(physical.RedirectDetect)
if !ok {
t.Fatalf("consul does not implement RedirectDetect")
}
host, err := detect.DetectHostAddr()
if err != nil {
t.Fatalf("err: %s", err)
}
if host == "" {
t.Fatalf("bad addr: %v", host)
}
}
func prepareTestContainer(t *testing.T) (cid dockertest.ContainerID, retAddress string) {
if os.Getenv("CONSUL_HTTP_ADDR") != "" {
return "", os.Getenv("CONSUL_HTTP_ADDR")
}
// Without this the checks for whether the container has started seem to
// never actually pass. There's really no reason to expose the test
// containers, so don't.
dockertest.BindDockerToLocalhost = "yep"
testImagePull.Do(func() {
dockertest.Pull(dockertest.ConsulImageName)
})
try := 0
cid, connErr := dockertest.ConnectToConsul(60, 500*time.Millisecond, func(connAddress string) bool {
try += 1
// Build a client and verify that the credentials work
config := api.DefaultConfig()
config.Address = connAddress
config.Token = dockertest.ConsulACLMasterToken
client, err := api.NewClient(config)
if err != nil {
if try > 50 {
panic(err)
}
return false
}
_, err = client.KV().Put(&api.KVPair{
Key: "setuptest",
Value: []byte("setuptest"),
}, nil)
if err != nil {
if try > 50 {
panic(err)
}
return false
}
retAddress = connAddress
return true
})
if connErr != nil {
t.Fatalf("could not connect to consul: %v", connErr)
}
return
}
func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
err := cid.KillRemove()
if err != nil {
t.Fatal(err)
}
}
|
[
"\"CONSUL_HTTP_ADDR\"",
"\"CONSUL_HTTP_ADDR\"",
"\"CONSUL_HTTP_ADDR\"",
"\"CONSUL_HTTP_ADDR\"",
"\"CONSUL_HTTP_ADDR\""
] |
[] |
[
"CONSUL_HTTP_ADDR"
] |
[]
|
["CONSUL_HTTP_ADDR"]
|
go
| 1 | 0 | |
BrAinPI/old/zarrVDS.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 9 16:26:33 2021
@author: alpha
"""
import os, glob, zarr, warnings
import numpy as np
from numcodecs import Blosc
compressor = Blosc(cname='zstd', clevel=9, shuffle=Blosc.BITSHUFFLE)
# ## Open fullres fmost CH1
# location = r"/CBI_Hive/globus/pitt/bil/CH1"
# location = r"h:/globus/pitt/bil/CH1"
# fileList = sorted(glob.glob(os.path.join(location,'*_CH1.tif')))
# testImage = io.imread(fileList[0])
# stack = [delayed(io.imread)(x) for x in fileList]
# stack = [da.from_delayed(x,shape=testImage.shape,dtype=testImage.dtype) for x in stack]
# imageStack = da.stack(stack)
class zarrVDS:
def __init__(self, directoryStr, shape=None, dtype=None, chunks=None, compressor=None, ResolutionLevelLock=0):
assert isinstance(directoryStr,str)
assert isinstance(shape,tuple)
assert len(shape) == 5,"All shapes and chunks must be 5 dimentions TCZYX"
assert len(chunks) == 5,"All shapes and chunks must be 5 dimentions TCZYX"
self.directoryStr = directoryStr
self.shape = shape
self.dtype = dtype
self.ndim = len(self.shape)
self.chunks = chunks
# Force chunk dims 0,1 to == 1 for time and color
if self.chunks[0] != 1:
warnings.warn('Chunk dim 0 must be 1. Resetting to 1')
self.chunks = list(self.chunks)
self.chunks[0] = 1
if self.chunks[1] != 1:
warnings.warn('Chunk dim 1 must be 1. Resetting to 1')
self.chunks = list(self.chunks)
self.chunks[1] = 1
self.chunks = tuple(self.chunks)
self.compressor = compressor
self.ResolutionLevelLock = ResolutionLevelLock
# Set defaults
if self.dtype is None:
self.dtype = np.float32
if self.compressor is None:
self.compressor = Blosc(cname='zstd', clevel=9, shuffle=Blosc.BITSHUFFLE)
# make location dir
if os.path.exists(self.directoryStr) == False:
os.makedirs(self.directoryStr)
# Initialize the first and only required array
self.initArray(0,0,0)
def __getitem__(self, key):
print(key)
# res = self.ResolutionLock
# if isinstance(key,tuple) and len(key) == 6 and isinstance(key[0], int):
# res = key[0]
# key = [x for x in key[1::]]
# if isinstance(key, int):
# key = [slice(key)]
# for _ in range(self.ndim-1):
# key.append(slice(None))
# key = tuple(key)
# if isinstance(key,tuple):
# key = [slice(x) if isinstance(x,int) else x for x in key]
# while len(key) < self.ndim:
# key.append(slice(None))
# key = tuple(key)
# print(key)
origionalKey = key
res = self.ResolutionLevelLock
if isinstance(key,slice) == False and isinstance(key,int) == False and len(key) == 6:
res = key[0]
# if res >= self.ResolutionLevels:
# raise ValueError('Layer is larger than the number of ResolutionLevels')
key = tuple([x for x in key[1::]])
## All slices will be converted to 5 dims and placed into a tuple
if isinstance(key,slice):
key = [key]
if isinstance(key, int):
key = [slice(key)]
## Convert int/slice mix to a tuple of slices
elif isinstance(key,tuple):
key = tuple([slice(x) if isinstance(x,int) else x for x in key])
key = list(key)
while len(key) < 5:
key.append(slice(None))
key = tuple(key)
print(res)
print(key)
## Convert slice None to int
newKey = []
for num, idx in enumerate(key):
if isinstance(idx.stop, int) and idx.start is None:
newKey.append(slice(idx.stop,idx.stop+1,idx.step))
## Need to throw errors here
if newKey[-1].stop >= self.shape[num]:
newKey[-1] = slice(newKey[-1].start,self.shape[num]-1,newKey[-1].step)
if newKey[-1].start >= self.shape[num]:
newKey[-1] = slice(newKey[-1].stop-1,newKey[-1].stop,newKey[-1].step)
if newKey[-1].step is None:
newKey[-1] = slice(newKey[-1].start,newKey[-1].stop,1)
else:
newKey.append(idx)
key = newKey
print(key)
# if self.cache == None:
# return getSlice(
# self,
# r = res if res is not None else 0,
# t = sliceFixer(self,key[0],'t',res=res),
# c = sliceFixer(self,key[1],'c',res=res),
# z = sliceFixer(self,key[2],'z',res=res),
# y = sliceFixer(self,key[3],'y',res=res),
# x = sliceFixer(self,key[4],'x',res=res)
# )
# else:
# return cache(location=self.cache_location,mem_size=self.mem_size,disk_size=self.disk_size)(getSlice)(
# self,
# r = res if res is not None else 0,
# t = sliceFixer(self,key[0],'t',res=res),
# c = sliceFixer(self,key[1],'c',res=res),
# z = sliceFixer(self,key[2],'z',res=res),
# y = sliceFixer(self,key[3],'y',res=res),
# x = sliceFixer(self,key[4],'x',res=res)
# )
sliceReturned = getSlice(
self,
r = res if res is not None else 0, #Force ResolutionLock of None to be 0 when slicing
t = sliceFixer(self,key[0],'t',res=res),
c = sliceFixer(self,key[1],'c',res=res),
z = sliceFixer(self,key[2],'z',res=res),
y = sliceFixer(self,key[3],'y',res=res),
x = sliceFixer(self,key[4],'x',res=res)
)
print('Image Slices Requested: {} / Item shape returned: {} \n'.format(origionalKey,sliceReturned.shape))
return sliceReturned
# return getArray(datasetNum=self.datasetNum,res=self.ResolutionLock,key=key)
def location(self,r,t,c):
return os.path.join(self.directoryStr,'{}.{}.{}.zarr'.format(r,t,c))
def initArray(self,r,t,c):
if os.path.exists(self.location(r,t,c)) == False:
store = zarr.ZipStore(self.location(r,t,c))
zarr.zeros(shape=self.shape[-2::], chunks=self.chunks[-2::], store=store, dtype=np.uint16,compressor=compressor)
store.close()
def getSlice(imsClass,r,t,c,z,y,x):
'''
IMS stores 3D datasets ONLY with Resolution, Time, and Color as 'directory'
structure witing HDF5. Thus, data access can only happen accross dims XYZ
for a specific RTC.
'''
# incomingSlices = (r,t,c,z,y,x)
tSize = list(range(imsClass.TimePoints)[t])
cSize = list(range(imsClass.Channels)[c])
zSize = len(range(imsClass.metaData[(r,0,0,'shape')][-3])[z])
ySize = len(range(imsClass.metaData[(r,0,0,'shape')][-2])[y])
xSize = len(range(imsClass.metaData[(r,0,0,'shape')][-1])[x])
outputArray = np.zeros((len(tSize),len(cSize),zSize,ySize,xSize))
# chunkRequested = outputArray.shape
with h5py.File(imsClass.filePathComplete, 'r') as hf:
for idxt, t in enumerate(tSize):
for idxc, c in enumerate(cSize):
# print(t)
# print(c)
dSetString = locationGenerator(r,t,c,data='data')
outputArray[idxt,idxc,:,:,:] = hf[dSetString][z,y,x]
''' Some issues here with the output of these arrays. Napari sometimes expects
3-dim arrays and sometimes 5-dim arrays which originates from the dask array input representing
tczyx dimentions of the imaris file. When os.environ["NAPARI_ASYNC"] = "1", squeezing
the array to 3 dimentions works. When ASYNC is off squeese does not work.
Napari thows an error because it did not get a 3-dim array.
Am I implementing slicing wrong? or does napari have some inconsistancy with the
dimentions of the arrays that it expects with different loading mechanisms if the
arrays have unused single dimentions.
Currently "NAPARI_ASYNC" = '1' is set to one in the image loader
Currently File/Preferences/Render Images Asynchronously must be turned on for this plugin to work
'''
try:
# if os.environ["NAPARI_ASYNC"] == '1':
# while outputArray.shape[0] == 1 and len(outputArray.shape) > 1:
# outputArray = outputArray[0,:]
# # sliceOutput = outputArray.shape
# # print('Incoming Slices: {} / Slice Requested: {} / Slice Output {}'.format(incomingSlices,chunkRequested,sliceOutput))
# return outputArray
## Above code only eliminates low single length dims
## Squeeze will eliminate ALL single length dims
if os.environ["NAPARI_ASYNC"] == '1':
return np.squeeze(outputArray)
except KeyError:
pass
# sliceOutput = outputArray.shape
# print('Incoming Slices: {} / Slice Requested: {} / Slice Output {}'.format(incomingSlices,chunkRequested,sliceOutput))
return outputArray
z = zarrVDS(r"Y:\bil", shape=(1,1,100,3000,3000), dtype=np.uint16,chunks=(2,1,1,1000,1000))
|
[] |
[] |
[
"NAPARI_ASYNC"
] |
[]
|
["NAPARI_ASYNC"]
|
python
| 1 | 0 | |
ippon.py
|
import tkinter as tk
import tkinter.font as tkfont
import keyboard
import zakoshi
# faviconの埋め込み
ICON_BASE64 = '''
R0lGODlhIQAgAHAAACH5BAEAAAEALAAAAAAhACAAgQAAAAAAAAAAAAAAAAJDjI+p
y+0Po5y02ouz3rz7/wHAIY5BWVYiuabnKqGy+5rRjCZwvNO1fevtXhacy8czAi/K
EZL3QzxB1Kr1is1qt9xGAQA7
'''
# 全ボタン
ALL_KEYS = '''\
q + w + e + r + t + y + u + i + o + p +\
a + s + d + f + g + h + j + k + l + z +\
x + c + v + b + n + m + esc + shift +\
ctrl + alt + enter\
'''
# ウィンドウ設定
root = tk.Tk()
root.title('IPPON Hollywood Zakoshisyoh')
root.geometry('1280x720')
root.configure(bg='#ffc700')
root.tk.call('wm', 'iconphoto', root._w, tk.PhotoImage(data=ICON_BASE64))
# テキスト表示
body_font = tkfont.Font(family='Meiryo UI', size=90, weight='bold')
body = tk.Label(root,
text='パソコンの全ボタンを\n一斉に押したら\n何が起きますか?',
bg='#ffc700',
font=body_font)
body.pack(anchor='center', expand=True)
title_font = tkfont.Font(family='Meiryo UI', size=60, weight='bold')
title = tk.Label(root,
text='IPPON GRAND PRIX',
fg='#ffc700',
bg='#000000',
font=title_font,
width='50')
title.pack(anchor='s')
# ホットキー設定
keyboard.add_hotkey(ALL_KEYS, lambda: zakoshi.answer(body))
keyboard.add_hotkey('space', lambda: zakoshi.answer(body)) # シークレットホットキー(テスト用)
root.mainloop()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
src/testcases/CWE23_Relative_Path_Traversal/CWE23_Relative_Path_Traversal__Environment_53a.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE23_Relative_Path_Traversal__Environment_53a.java
Label Definition File: CWE23_Relative_Path_Traversal.label.xml
Template File: sources-sink-53a.tmpl.java
*/
/*
* @description
* CWE: 23 Relative Path Traversal
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded string
* Sinks: readFile
* BadSink : no validation
* Flow Variant: 53 Data flow: data passed as an argument from one method through two others to a fourth; all four functions are in different classes in the same package
*
* */
package testcases.CWE23_Relative_Path_Traversal;
import testcasesupport.*;
import java.io.*;
import javax.servlet.http.*;
public class CWE23_Relative_Path_Traversal__Environment_53a extends AbstractTestCase
{
public void bad() throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
(new CWE23_Relative_Path_Traversal__Environment_53b()).badSink(data );
}
public void good() throws Throwable
{
goodG2B();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
String data;
/* FIX: Use a hardcoded string */
data = "foo";
(new CWE23_Relative_Path_Traversal__Environment_53b()).goodG2BSink(data );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
pkg/cloudwatch/cloudwatch.go
|
package cloudwatch
import (
"errors"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/calebpalmer/nvidia-cloudwatch/pkg/nvidia"
)
// createSession creates an AWS Session
func createSession(region *string) (*session.Session, error) {
if region == nil || *region == "" {
*region = "us-east-1"
}
sess, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("us-east-1"),
},
})
if err != nil {
return nil, err
}
return sess, nil
}
// getInstance gets the name of the ec2 instance or NA if not running on an ec2 instance
func getInstance() string {
resp, err := http.Get("http://169.254.169.254/latest/meta-data/instance-id")
if err != nil {
return "NA"
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
return string(body)
}
// Find takes a slice and looks for an element in it. If found it will
// return it's key, otherwise it will return -1 and a bool of false.
func find(slice []*float64, val *float64) (int, bool) {
for i, item := range slice {
if item == val {
return i, true
}
}
return -1, false
}
// mergeMetrics, merges two sets of metrics.
func mergeMetrics(m1 *cloudwatch.MetricDatum, m2 *cloudwatch.MetricDatum) *cloudwatch.MetricDatum {
m1.Timestamp = m2.Timestamp
m1.Dimensions = m2.Dimensions
m1.MetricName = m2.MetricName
m1.StorageResolution = m2.StorageResolution
m1.Unit = m2.Unit
if m1.Values == nil {
m1.Values = make([]*float64, 0, int(*m2.StorageResolution))
m1.Values = nil
}
if m1.Counts == nil {
m1.Counts = make([]*float64, 0, int(*m2.StorageResolution))
}
k, found := find(m1.Values, m2.Value)
if found == false {
m1.Values = append(m1.Values, m2.Value)
count := 1.0
m1.Counts = append(m1.Counts, &count)
} else {
newCount := *m1.Counts[k] + 1.0
m1.Counts[k] = &newCount
}
return m1
}
// getMetrics gets metrics to be pushed to cloudwatch
func getMetrics(instance *string, resolution int64) []*cloudwatch.MetricDatum {
time := time.Now()
devices, err := nvidia.GetDevices()
if err != nil {
panic(err)
}
instanceDimension := "Instance"
gpuDimension := "GPU"
gpuUtilizationMetricName := "GPUUtilization"
memoryUsedMetricName := "MemoryUsed"
memoryFreeMetricName := "MemoryFree"
percentUnit := cloudwatch.StandardUnitPercent
memoryUnit := cloudwatch.StandardUnitMegabytes
metrics := make([]*cloudwatch.MetricDatum, 0, 3)
for _, device := range devices {
dimensions := []*cloudwatch.Dimension{
&cloudwatch.Dimension{Name: &instanceDimension, Value: instance},
&cloudwatch.Dimension{Name: &gpuDimension, Value: &device.UUID},
}
gpuUtilization := float64(*device.GPUUtilization)
gpuUtilizationMetric := cloudwatch.MetricDatum{
Timestamp: &time,
Dimensions: dimensions,
MetricName: &gpuUtilizationMetricName,
StorageResolution: &resolution,
Unit: &percentUnit,
Value: &gpuUtilization,
}
metrics = append(metrics, &gpuUtilizationMetric)
memoryUsed := float64(*device.UsedMemory)
memoryUsedMetric := cloudwatch.MetricDatum{
Timestamp: &time,
Dimensions: dimensions,
MetricName: &memoryUsedMetricName,
StorageResolution: &resolution,
Unit: &memoryUnit,
Value: &memoryUsed,
}
metrics = append(metrics, &memoryUsedMetric)
memoryFree := float64(*device.FreeMemory)
memoryFreeMetric := cloudwatch.MetricDatum{
Timestamp: &time,
Dimensions: dimensions,
MetricName: &memoryFreeMetricName,
StorageResolution: &resolution,
Unit: &memoryUnit,
Value: &memoryFree,
}
metrics = append(metrics, &memoryFreeMetric)
}
return metrics
}
// logMetrics pushes metrics into Cloudwatch.
func logMetrics(svc *cloudwatch.CloudWatch,
metrics []*cloudwatch.MetricDatum) {
for _, m := range metrics {
err := m.Validate()
if err != nil {
panic(err)
}
}
namespace := "nvidia-cloudwatch"
_, err := svc.PutMetricData(&cloudwatch.PutMetricDataInput{
MetricData: metrics,
Namespace: &namespace,
})
if err != nil {
log.Panic(err)
}
}
// logAllMetrics logs a list of list of metrics.
func logAllMetrics(svc *cloudwatch.CloudWatch, metricsList [][]*cloudwatch.MetricDatum) {
for _, metrics := range metricsList {
logMetrics(svc, metrics)
}
}
// StartExporter starts the process of pushing metrics
func StartExporter() {
region := os.Getenv("AWS_REGION")
sess, err := createSession(®ion)
if err != nil {
log.Panic(err)
}
// the period at which we collect and send metrics.
period := 60
p := os.Getenv("PERIOD")
if p != "" {
period, err = strconv.Atoi(p)
if err != nil {
log.Panic(err)
}
}
resolution := 60
r := os.Getenv("RESOLUTION")
if r != "" {
resolution, err = strconv.Atoi(r)
if err != nil {
log.Panic(err)
}
}
if !(resolution == 60 || resolution == 1) {
panic(errors.New("Resolution must be 1 or 60"))
}
if period < resolution {
panic(errors.New("Period must be greater than or equal to resolution."))
}
instanceName := getInstance()
svc := cloudwatch.New(sess)
if resolution == 60 {
lastLogTime := time.Now().Truncate(time.Second)
metrics := make([]*cloudwatch.MetricDatum, 3, 3)
for i, _ := range metrics {
metrics[i] = &cloudwatch.MetricDatum{}
}
for {
nextTime := time.Now().Truncate(time.Second)
nextTime = nextTime.Add(time.Duration(resolution) * time.Second)
newMetrics := getMetrics(&instanceName, int64(resolution))
// merge them. We assume that they are in the same order.
for i, _ := range metrics {
metrics[i] = mergeMetrics(metrics[i], newMetrics[i])
}
if int(time.Now().Truncate(time.Second).Sub(lastLogTime)/time.Second) >= period {
go logMetrics(svc, metrics)
lastLogTime = time.Now().Truncate(time.Second)
metrics = make([]*cloudwatch.MetricDatum, 3, 3)
for i, _ := range metrics {
metrics[i] = &cloudwatch.MetricDatum{}
}
}
time.Sleep(time.Until(nextTime))
}
} else {
// 1 second resolution
lastLogTime := time.Now().Truncate(time.Second)
metrics := make([][]*cloudwatch.MetricDatum, 0, 60)
for {
nextTime := time.Now().Truncate(time.Second)
nextTime = nextTime.Add(time.Duration(resolution) * time.Second)
metrics = append(metrics, getMetrics(&instanceName, int64(resolution)))
if int(time.Now().Truncate(time.Second).Sub(lastLogTime)/time.Second) >= period {
go logAllMetrics(svc, metrics)
lastLogTime = time.Now().Truncate(time.Second)
metrics = make([][]*cloudwatch.MetricDatum, 0, 60)
}
time.Sleep(time.Until(nextTime))
}
}
}
|
[
"\"AWS_REGION\"",
"\"PERIOD\"",
"\"RESOLUTION\""
] |
[] |
[
"RESOLUTION",
"PERIOD",
"AWS_REGION"
] |
[]
|
["RESOLUTION", "PERIOD", "AWS_REGION"]
|
go
| 3 | 0 | |
cmd/api/main.go
|
package main
import (
"database/sql"
"net"
"os"
"github.com/joho/godotenv"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"kannon.gyozatech.dev/generated/pb"
)
func main() {
log.SetFormatter(&log.JSONFormatter{})
runGrpcServer()
}
func runGrpcServer() error {
godotenv.Load()
dbi, err := sql.Open("postgres", os.Getenv("DB_CONN"))
if err != nil {
panic(err)
}
apiService, err := createAPIService(dbi)
if err != nil {
return err
}
log.Info("😃 Open TCP Connection")
lis, err := net.Listen("tcp", "0.0.0.0:50051")
defer lis.Close()
if err != nil {
return err
}
s := grpc.NewServer()
pb.RegisterApiServer(s, apiService)
log.Infof("🚀 starting gRPC... Listening on %v\n", lis.Addr())
if err := s.Serve(lis); err != nil {
return err
}
return nil
}
|
[
"\"DB_CONN\""
] |
[] |
[
"DB_CONN"
] |
[]
|
["DB_CONN"]
|
go
| 1 | 0 | |
src/dotnetcore/supply/supply.go
|
package supply
import (
"crypto/md5"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/cloudfoundry/dotnet-core-buildpack/src/dotnetcore/config"
"github.com/cloudfoundry/dotnet-core-buildpack/src/dotnetcore/project"
"github.com/cloudfoundry/libbuildpack"
)
type Command interface {
Execute(string, io.Writer, io.Writer, string, ...string) error
Output(string, string, ...string) (string, error)
}
type Manifest interface {
AllDependencyVersions(string) []string
DefaultVersion(string) (libbuildpack.Dependency, error)
}
type Installer interface {
FetchDependency(libbuildpack.Dependency, string) error
InstallDependency(libbuildpack.Dependency, string) error
InstallOnlyVersion(string, string) error
}
type Stager interface {
BuildDir() string
CacheDir() string
DepDir() string
DepsIdx() string
LinkDirectoryInDepDir(string, string) error
AddBinDependencyLink(string, string) error
WriteEnvFile(string, string) error
WriteProfileD(string, string) error
SetStagingEnvironment() error
}
type Supplier struct {
Stager Stager
Manifest Manifest
Installer Installer
Log *libbuildpack.Logger
Command Command
Config *config.Config
Project *project.Project
}
func Run(s *Supplier) error {
s.Log.BeginStep("Supplying Dotnet Core")
if err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "touch", "/tmp/checkpoint"); err != nil {
s.Log.Error("Unable to execute command: %s", err.Error())
return err
}
if checksum, err := s.CalcChecksum(); err == nil {
s.Log.Debug("BuildDir Checksum Before Supply: %s", checksum)
}
if err := s.InstallLibunwind(); err != nil {
s.Log.Error("Unable to install Libunwind: %s", err.Error())
return err
}
usesLibgdiplus, err := s.Project.UsesLibrary("System.Drawing.Common")
if err != nil {
s.Log.Error(`Error searching project for library "System.Drawing.Common": %s`, err.Error())
return err
}
if usesLibgdiplus {
if err := s.InstallLibgdiplus(); err != nil {
s.Log.Error("Unable to install libgdiplus: %s", err.Error())
return err
}
}
if err := s.InstallDotnetSdk(); err != nil {
s.Log.Error("Unable to install Dotnet SDK: %s", err.Error())
return err
}
if err := s.InstallNode(); err != nil {
s.Log.Error("Unable to install NodeJs: %s", err.Error())
return err
}
if err := s.InstallBower(); err != nil {
s.Log.Error("Unable to install Bower: %s", err.Error())
return err
}
if err := s.Stager.SetStagingEnvironment(); err != nil {
s.Log.Error("Unable to setup environment variables: %s", err.Error())
return err
}
if checksum, err := s.CalcChecksum(); err == nil {
s.Log.Debug("BuildDir Checksum After Supply: %s", checksum)
}
if filesChanged, err := s.Command.Output(s.Stager.BuildDir(), "find", ".", "-newer", "/tmp/checkpoint", "-not", "-path", "./.cloudfoundry/*", "-not", "-path", "./.cloudfoundry"); err == nil && filesChanged != "" {
s.Log.Debug("Below files changed:")
s.Log.Debug(filesChanged)
}
return nil
}
func (s *Supplier) InstallLibunwind() error {
if err := s.Installer.InstallOnlyVersion("libunwind", filepath.Join(s.Stager.DepDir(), "libunwind")); err != nil {
return err
}
return s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "libunwind", "lib"), "lib")
}
func (s *Supplier) InstallLibgdiplus() error {
if err := s.Installer.InstallOnlyVersion("libgdiplus", filepath.Join(s.Stager.DepDir(), "libgdiplus")); err != nil {
return err
}
return s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "libgdiplus", "lib"), "lib")
}
func (s *Supplier) shouldInstallBower() (bool, error) {
err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "bower", "-v")
if err == nil {
return false, nil
}
if isPublished, err := s.Project.IsPublished(); err != nil {
return false, err
} else if isPublished {
return false, nil
}
if commandsPresent, err := s.commandsInProjFiles([]string{"bower"}); err != nil {
return false, err
} else if commandsPresent {
return true, nil
}
return false, nil
}
func (s *Supplier) bowerInstall() error {
versions := s.Manifest.AllDependencyVersions("bower")
dep := libbuildpack.Dependency{Name: "bower", Version: versions[0]}
dir, err := ioutil.TempDir("", "dotnet-core_buildpack-bower")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(dir)
if err := s.Installer.FetchDependency(dep, filepath.Join(dir, "bower.tar.gz")); err != nil {
return err
}
if err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "npm", "install", "-g", filepath.Join(dir, "bower.tar.gz")); err != nil {
return err
}
return s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "node", "bin"), "bin")
}
func (s *Supplier) InstallBower() error {
if shouldInstallBower, err := s.shouldInstallBower(); err != nil {
return err
} else if !shouldInstallBower {
return nil
}
if err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "npm", "-v"); err != nil {
return fmt.Errorf("Trying to install bower but NPM is not installed")
}
return s.bowerInstall()
}
func (s *Supplier) InstallNode() error {
shouldInstallNode, err := s.shouldInstallNode()
if err != nil {
return fmt.Errorf("Could not decide whether to install node: %v", err)
}
if shouldInstallNode {
version, err := libbuildpack.FindMatchingVersion("x", s.Manifest.AllDependencyVersions("node"))
if err != nil {
return err
}
dep := libbuildpack.Dependency{
Name: "node",
Version: version,
}
nodePath := filepath.Join(s.Stager.DepDir(), "node")
if err := s.Installer.InstallDependency(dep, nodePath); err != nil {
return err
}
return s.Stager.LinkDirectoryInDepDir(filepath.Join(nodePath, "bin"), "bin")
}
return nil
}
func (s *Supplier) shouldInstallNode() (bool, error) {
err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "node", "-v")
if err == nil {
return false, nil
}
if os.Getenv("INSTALL_NODE") != "" {
return true, nil
}
if isPublished, err := s.Project.IsPublished(); err != nil {
return false, fmt.Errorf("Could not determine if project is published: %v", err)
} else if isPublished {
return false, nil
}
return s.commandsInProjFiles([]string{"npm", "bower"})
}
func (s *Supplier) commandsInProjFiles(commands []string) (bool, error) {
projFiles, err := s.Project.ProjectFilePaths()
if err != nil {
return false, fmt.Errorf("Could not get project file paths: %v", err)
}
for _, projFile := range projFiles {
obj := struct {
Sdk string `xml:"Sdk,attr"`
Target struct {
Name string `xml:"Name,attr"`
BeforeTargets string `xml:"BeforeTargets,attr"`
AfterTargets string `xml:"AfterTargets,attr"`
Exec []struct {
Command string `xml:"Command,attr"`
} `xml:"Exec"`
} `xml:"Target"`
}{}
projFileContent, err := ioutil.ReadFile(projFile)
if err != nil {
return false, fmt.Errorf("Could not read project file: %v", err)
}
if err := xml.Unmarshal(projFileContent, &obj); err != nil {
return false, fmt.Errorf("Could not unmarshal project file: %v", err)
}
targetNames := []string{"BeforeBuild", "BeforeCompile", "BeforePublish", "AfterBuild", "AfterCompile", "AfterPublish"}
nameInTargetNames := false
for _, name := range targetNames {
if name == obj.Target.Name {
nameInTargetNames = true
break
}
}
attrInTargetAttrs := obj.Target.BeforeTargets != "" || obj.Target.AfterTargets != ""
if nameInTargetNames || attrInTargetAttrs {
for _, ex := range obj.Target.Exec {
command := ex.Command
for _, cmd := range commands {
if strings.Contains(command, cmd) {
return true, nil
}
}
}
}
}
return false, nil
}
// Turn a semver string into major.minor.x
// Will turn a.b.c into a.b.x
// Will not modify strings that don't match a.b.c
func sdkRollForward(version string, versions []string) (string, error) {
var featureLine string
var highestPatch string
parts := strings.SplitN(version, ".", 3)
if len(parts) == 3 {
featureLine = parts[2][:1]
}
for _, v := range versions {
versionSplit := strings.SplitN(v, ".", 3)
if len(versionSplit) == 3 && versionSplit[2][:1] == featureLine {
if highestPatch == "" {
highestPatch = versionSplit[2][1:]
} else {
current, err := strconv.Atoi(highestPatch)
comp, err := strconv.Atoi(versionSplit[2][1:])
if err != nil {
return "", err
}
if current < comp {
highestPatch = versionSplit[2][1:]
}
}
}
}
if highestPatch == "" {
return "", fmt.Errorf("could not find sdk in same feature line as '%s'", version)
}
return fmt.Sprintf("%s.%s.%s%s", parts[0], parts[1], featureLine, highestPatch), nil
}
func (s *Supplier) pickVersionToInstall() (string, error) {
allVersions := s.Manifest.AllDependencyVersions("dotnet-sdk")
buildpackVersion, err := s.buildpackYamlSdkVersion()
if err != nil {
return "", err
}
if buildpackVersion != "" {
version, err := project.FindMatchingVersionWithPreview(buildpackVersion, allVersions)
if err != nil {
s.Log.Warning("SDK %s in buildpack.yml is not available", buildpackVersion)
return "", err
}
return version, err
}
globalJSONVersion, err := s.globalJsonSdkVersion()
if err != nil {
return "", err
}
if globalJSONVersion != "" {
if contains(allVersions, globalJSONVersion) {
return globalJSONVersion, nil
}
s.Log.Warning("SDK %s in global.json is not available", globalJSONVersion)
installVersion, err := sdkRollForward(globalJSONVersion, allVersions)
if err == nil {
s.Log.Info("falling back to latest version in version line")
return installVersion, nil
}
return "", err
}
dep, err := s.Manifest.DefaultVersion("dotnet-sdk")
if err != nil {
return "", err
}
s.Log.Info("using the default SDK")
return dep.Version, nil
}
func (s *Supplier) InstallDotnetSdk() error {
installVersion, err := s.pickVersionToInstall()
if err != nil {
return err
}
s.Config.DotnetSdkVersion = installVersion
if err := s.Installer.InstallDependency(libbuildpack.Dependency{Name: "dotnet-sdk", Version: installVersion}, filepath.Join(s.Stager.DepDir(), "dotnet-sdk")); err != nil {
return err
}
if err := s.Stager.AddBinDependencyLink(filepath.Join(s.Stager.DepDir(), "dotnet-sdk", "dotnet"), "dotnet"); err != nil {
return err
}
return s.installRuntimeIfNeeded()
}
func (s *Supplier) installRuntimeIfNeeded() error {
runtimeVersionPath := filepath.Join(s.Stager.DepDir(), "dotnet-sdk", "RuntimeVersion.txt")
exists, err := libbuildpack.FileExists(runtimeVersionPath)
if err != nil {
return err
} else if exists {
version, err := ioutil.ReadFile(runtimeVersionPath)
if err != nil {
return err
}
name := "dotnet-runtime"
runtimeVersion, err := s.Project.FindMatchingFrameworkVersion(name, string(version), nil)
if err != nil {
return err
}
return s.Installer.InstallDependency(libbuildpack.Dependency{Name: name, Version: runtimeVersion}, filepath.Join(s.Stager.DepDir(), "dotnet-sdk"))
}
return nil
}
func (s *Supplier) buildpackYamlSdkVersion() (string, error) {
if found, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "buildpack.yml")); err != nil || !found {
return "", err
}
obj := struct {
DotnetCore struct {
Version string `yaml:"sdk"`
} `yaml:"dotnet-core"`
}{}
if err := libbuildpack.NewYAML().Load(filepath.Join(s.Stager.BuildDir(), "buildpack.yml"), &obj); err != nil {
return "", err
}
return obj.DotnetCore.Version, nil
}
func (s *Supplier) globalJsonSdkVersion() (string, error) {
if found, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "global.json")); err != nil || !found {
return "", err
}
obj := struct {
Sdk struct {
Version string `json:"version"`
} `json:"sdk"`
}{}
if err := libbuildpack.NewJSON().Load(filepath.Join(s.Stager.BuildDir(), "global.json"), &obj); err != nil {
return "", err
}
return obj.Sdk.Version, nil
}
func (s *Supplier) CalcChecksum() (string, error) {
h := md5.New()
basepath := s.Stager.BuildDir()
err := filepath.Walk(basepath, func(path string, info os.FileInfo, err error) error {
if info.Mode().IsRegular() {
relpath, err := filepath.Rel(basepath, path)
if strings.HasPrefix(relpath, ".cloudfoundry/") {
return nil
}
if err != nil {
return err
}
if _, err := io.WriteString(h, relpath); err != nil {
return err
}
if f, err := os.Open(path); err != nil {
return err
} else {
if _, err := io.Copy(h, f); err != nil {
return err
}
}
}
return nil
})
if err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)), nil
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
|
[
"\"INSTALL_NODE\""
] |
[] |
[
"INSTALL_NODE"
] |
[]
|
["INSTALL_NODE"]
|
go
| 1 | 0 | |
qa/pull-tester/rpc-tests.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = BUILDDIR + '/src/bitdinard' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'test_script_address2.py'
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
[] |
[] |
[
"BITCOIND"
] |
[]
|
["BITCOIND"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"log"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/joho/godotenv"
"github.com/labstack/echo"
)
// Db is database
var db *sqlx.DB
func init() {
// TwitterAPIのURL作成と、ヘッダーへのBearer tokenの追加
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file,", err)
}
// MYSQLへの接続
mysqlUser := os.Getenv("MYSQL_USER")
mysqlPass := os.Getenv("MYSQL_PASSWORD")
mysqlProtocol := os.Getenv("MYSQL_PROTOCOL")
mysqlAddr := os.Getenv("MYSQL_ADDRESS")
mysqlDBName := "twicord"
mysqlDSN := mysqlUser + ":" + mysqlPass + "@" + mysqlProtocol + "(" + mysqlAddr + ")" + "/" + mysqlDBName
db, err = sqlx.Connect("mysql", mysqlDSN)
if err != nil {
log.Fatal("Error: connect MySQL,", err)
}
// MYSQLのスキーマ定義
schema := `
CREATE TABLE IF NOT EXISTS twitter_user(
id BIGINT UNSIGNED NOT NULL PRIMARY KEY,
screen_name CHAR(50) NOT NULL,
image_url VARCHAR(2500)
);`
db.MustExec(schema)
}
func main() {
e := echo.New()
e.POST("/regist/twitter/:screen_name", HandleRegistByTwitterName)
// フロントエンドで表にする用の今登録してるユーザを全てとってくるやつ
// e.GET("/twitter/all", HandleRegistered)
// memo: twitterAPIv2を使えばstreamとしてTweetをとってこれる
// rulesを追加することでruleにそったものだけをストリームできる(ルールごと512制限があるので、同時に20ユーザぐらいのTweetをストリームできそう)
// https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/post-tweets-search-stream-rules
e.Start(":8000")
}
|
[
"\"MYSQL_USER\"",
"\"MYSQL_PASSWORD\"",
"\"MYSQL_PROTOCOL\"",
"\"MYSQL_ADDRESS\""
] |
[] |
[
"MYSQL_USER",
"MYSQL_ADDRESS",
"MYSQL_PROTOCOL",
"MYSQL_PASSWORD"
] |
[]
|
["MYSQL_USER", "MYSQL_ADDRESS", "MYSQL_PROTOCOL", "MYSQL_PASSWORD"]
|
go
| 4 | 0 | |
src/os/exec/lp_unix.go
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package exec
import (
"errors"
"os"
"path/filepath"
"strings"
)
// ErrNotFound is the error resulting if a path search failed to find an executable file.
var ErrNotFound = errors.New("executable file not found in $PATH")
func findExecutable(file string) error {
d, err := os.Stat(file)
if err != nil {
return err
}
if m := d.Mode(); !m.IsDir() && m&0111 != 0 {
return nil
}
return os.ErrPermission
}
// LookPath searches for an executable named file in the
// directories named by the PATH environment variable.
// If file contains a slash, it is tried directly and the PATH is not consulted.
// The result may be an absolute path or a path relative to the current directory.
func LookPath(file string) (string, error) {
// NOTE(rsc): I wish we could use the Plan 9 behavior here
// (only bypass the path if file begins with / or ./ or ../)
// but that would not match all the Unix shells.
if strings.Contains(file, "/") {
err := findExecutable(file)
if err == nil {
return file, nil
}
return "", &Error{file, err}
}
path := os.Getenv("PATH")
for _, dir := range filepath.SplitList(path) {
if dir == "" {
// Unix shell semantics: path element "" means "."
dir = "."
}
path := filepath.Join(dir, file)
if err := findExecutable(path); err == nil {
return path, nil
}
}
return "", &Error{file, ErrNotFound}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
worker.py
|
from copy import deepcopy
from functools import partial
import os
from typing import Dict, Tuple, Union
from catboost import CatBoostClassifier, metrics
import ConfigSpace as cs
from hpbandster.core.worker import Worker
import numpy as np
from pytorch_tabnet.tab_model import TabNetClassifier
from sklearn.metrics import balanced_accuracy_score
import torch
import xgboost as xgb
from data.loader import Loader
def balanced_error(
threshold_predictions: bool,
predt: np.ndarray,
dtrain: xgb.DMatrix,
) -> Tuple[str, float]:
"""Calculate the balanced error for the predictions.
Calculate the balanced error. Used as an evaluation metric for
the xgboost algorithm.
Parameters:
-----------
threshold_predictions: bool
If the predictions should be threshold to 0 or 1. Should only be used for
binary classification.
predt: np.ndarray
The predictions of the algorithm.
dtrain: float
The real values for the set.
Returns:
--------
str, float - The name of the evaluation metric and its value on the arguments.
"""
if threshold_predictions:
predt = np.array(predt)
predt = predt > 0.5
predt = predt.astype(int)
else:
predt = np.argmax(predt, axis=1)
y_train = dtrain.get_label()
accuracy_score = balanced_accuracy_score(y_train, predt)
return 'Balanced_error', 1 - accuracy_score
class XGBoostWorker(Worker):
def __init__(self, *args, param=None, **kwargs):
super().__init__(*args, **kwargs)
self.param = deepcopy(param)
self.task_id = self.param['task_id']
self.output_directory = self.param['output_directory']
del self.param['task_id']
del self.param['output_directory']
if self.param['objective'] == 'binary:logistic':
self.threshold_predictions = True
else:
self.threshold_predictions = False
def compute(self, config, budget, **kwargs):
"""What should be computed for one XGBoost worker.
The function takes a configuration and a budget, it
then uses the xgboost algorithm to generate a loss
and other information.
Parameters:
-----------
config: dict
dictionary containing the sampled configurations by the optimizer
budget: float
amount of time/epochs/etc. the model can use to train
Returns:
--------
dict:
With the following mandatory arguments:
'loss' (scalar)
'info' (dict)
"""
xgboost_config = deepcopy(self.param)
xgboost_config.update(config)
if 'num_round' in xgboost_config:
num_rounds = xgboost_config['num_round']
del xgboost_config['num_round']
early_stopping_iterations = None
else:
num_rounds = 4000
early_stopping_iterations = \
xgboost_config['early_stopping_rounds']
del xgboost_config['early_stopping_rounds']
if 'use_imputation' in xgboost_config:
apply_imputation = xgboost_config['use_imputation']
del xgboost_config['use_imputation']
else:
# if no conditional imputation, always use it
apply_imputation = True
if xgboost_config['use_ohe'] == 'True':
use_ohe = True
else:
use_ohe = False
del xgboost_config['use_ohe']
loader = Loader(
task_id=self.task_id,
seed=xgboost_config['seed'],
apply_one_hot_encoding=use_ohe,
apply_imputation=apply_imputation,
)
splits = loader.get_splits()
# not used at the moment
# categorical_information = loader.categorical_information
X_train = splits['X_train']
X_val = splits['X_val']
X_test = splits['X_test']
y_train = splits['y_train']
y_val = splits['y_val']
y_test = splits['y_test']
d_train = xgb.DMatrix(X_train, label=y_train)
d_val = xgb.DMatrix(X_val, label=y_val)
d_test = xgb.DMatrix(X_test, label=y_test)
eval_results = {}
gb_model = xgb.train(
xgboost_config,
d_train,
num_rounds,
feval=partial(balanced_error, self.threshold_predictions),
evals=[(d_train, 'd_train'), (d_val, 'd_val')],
evals_result=eval_results,
early_stopping_rounds=early_stopping_iterations,
)
# TODO Do something with eval_results in the future
# print(eval_results)
# Default value if early stopping is not activated
best_iteration = None
n_tree_limit = None
# early stopping activated and triggered
if hasattr(gb_model, 'best_score'):
n_tree_limit = gb_model.best_ntree_limit
best_iteration = gb_model.best_iteration
print(f'Best iteration for xgboost: {best_iteration}')
y_train_preds = gb_model.predict(
d_train,
ntree_limit=n_tree_limit,
)
y_val_preds = gb_model.predict(
d_val,
ntree_limit=n_tree_limit,
)
y_test_preds = gb_model.predict(
d_test,
ntree_limit=n_tree_limit,
)
if self.threshold_predictions:
y_train_preds = np.array(y_train_preds)
y_train_preds = y_train_preds > 0.5
y_train_preds = y_train_preds.astype(int)
y_val_preds = np.array(y_val_preds)
y_val_preds = y_val_preds > 0.5
y_val_preds = y_val_preds.astype(int)
y_test_preds = np.array(y_test_preds)
y_test_preds = y_test_preds > 0.5
y_test_preds = y_test_preds.astype(int)
train_performance = balanced_accuracy_score(y_train, y_train_preds)
val_performance = balanced_accuracy_score(y_val, y_val_preds)
test_performance = balanced_accuracy_score(y_test, y_test_preds)
if val_performance is None or val_performance is np.inf:
val_error_rate = 1
else:
val_error_rate = 1 - val_performance
res = {
'train_accuracy': float(train_performance),
'val_accuracy': float(val_performance),
'test_accuracy': float(test_performance),
'best_round': best_iteration,
}
return ({
'loss': float(val_error_rate), # this is the a mandatory field to run hyperband
'info': res # can be used for any user-defined information - also mandatory
})
def refit(self, config):
"""Runs refit on the best configuration.
The function refits on the best configuration. It then
proceeds to train and test the network, this time combining
the train and validation set together for training. Probably,
in the future, a budget should be added too as an argument to
the parameter.
Parameters:
-----------
config: dict
dictionary containing the sampled configurations by the optimizer
Returns:
--------
res: dict
Dictionary with the train and test accuracy.
"""
xgboost_config = deepcopy(self.param)
xgboost_config.update(config)
if 'num_round' in xgboost_config:
num_rounds = xgboost_config['num_round']
del xgboost_config['num_round']
else:
num_rounds = 4000
if xgboost_config['use_ohe'] == 'True':
use_ohe = True
else:
use_ohe = False
del xgboost_config['use_ohe']
if 'use_imputation' in xgboost_config:
apply_imputation = xgboost_config['use_imputation']
del xgboost_config['use_imputation']
else:
# if no conditional imputation, always use it
apply_imputation = True
loader = Loader(
task_id=self.task_id,
val_fraction=0,
seed=xgboost_config['seed'],
apply_one_hot_encoding=use_ohe,
apply_imputation=apply_imputation,
)
splits = loader.get_splits()
X_train = splits['X_train']
X_test = splits['X_test']
y_train = splits['y_train']
y_test = splits['y_test']
d_train = xgb.DMatrix(X_train, label=y_train)
d_test = xgb.DMatrix(X_test, label=y_test)
eval_results = {}
gb_model = xgb.train(
xgboost_config,
d_train,
num_rounds,
feval=partial(balanced_error, self.threshold_predictions),
evals=[(d_train, 'd_train'), (d_test, 'd_test')],
evals_result=eval_results,
)
gb_model.save_model(
os.path.join(
self.output_directory,
'xgboost_refit_model_dump.json',
)
)
# TODO do something with eval_results
# print(eval_results)
# make prediction
y_train_preds = gb_model.predict(d_train)
y_test_preds = gb_model.predict(d_test)
if self.threshold_predictions:
y_train_preds = np.array(y_train_preds)
y_train_preds = y_train_preds > 0.5
y_train_preds = y_train_preds.astype(int)
y_test_preds = np.array(y_test_preds)
y_test_preds = y_test_preds > 0.5
y_test_preds = y_test_preds.astype(int)
train_performance = balanced_accuracy_score(y_train, y_train_preds)
test_performance = balanced_accuracy_score(y_test, y_test_preds)
if test_performance is None or test_performance is np.inf:
test_performance = 0
res = {
'train_accuracy': float(train_performance),
'test_accuracy': float(test_performance),
}
return res
@staticmethod
def get_default_configspace(
seed: int = 11,
early_stopping: bool = False,
conditional_imputation: bool = False,
) -> cs.ConfigurationSpace:
"""Get the hyperparameter search space.
The function provides the configuration space that is
used to generate the algorithm specific hyperparameter
search space.
Parameters:
-----------
seed: int
The seed used to build the configuration space.
Returns:
--------
config_space: cs.ConfigurationSpace
Configuration space for XGBoost.
"""
config_space = cs.ConfigurationSpace(seed=seed)
# learning rate
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'eta',
lower=0.001,
upper=1,
log=True,
)
)
# l2 regularization
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'lambda',
lower=1E-10,
upper=1,
log=True,
)
)
# l1 regularization
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'alpha',
lower=1E-10,
upper=1,
log=True,
)
)
# not added directly because condition
# has to be applied.
booster = cs.CategoricalHyperparameter(
'booster',
choices=['gbtree', 'dart'],
)
config_space.add_hyperparameter(
booster,
)
rate_drop = cs.UniformFloatHyperparameter(
'rate_drop',
lower=1e-10,
upper=1-(1e-10),
default_value=0.5,
)
config_space.add_hyperparameter(
rate_drop,
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'gamma',
lower=0.1,
upper=1,
log=True,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'colsample_bylevel',
lower=0.1,
upper=1,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'colsample_bynode',
lower=0.1,
upper=1,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'colsample_bytree',
lower=0.5,
upper=1,
)
)
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'max_depth',
lower=1,
upper=20,
)
)
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'max_delta_step',
lower=0,
upper=10,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'min_child_weight',
lower=0.1,
upper=20,
log=True,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'subsample',
lower=0.01,
upper=1,
)
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'use_ohe',
choices=['True', 'False'],
)
)
if conditional_imputation:
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'use_imputation',
choices=['True', 'False'],
)
)
# if early stopping is activated, add the
# number of stopping rounds as a hyperparameter.
# Number of rounds is fixed at 4000.
if early_stopping:
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'early_stopping_rounds',
lower=1,
upper=20,
)
)
else:
# no early stopping activated, number of rounds
# is a hyperparameter.
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'num_round',
lower=1,
upper=1000,
)
)
config_space.add_condition(
cs.EqualsCondition(
rate_drop,
booster,
'dart',
)
)
return config_space
@staticmethod
def get_parameters(
nr_classes: int,
seed: int = 11,
nr_threads: int = 1,
task_id: int = 233088,
output_directory: str = 'path_to_output',
) -> Dict[str, Union[int, str]]:
"""Get the parameters of the method.
Get a dictionary based on the arguments given to the
function, which will be used to as the initial configuration
for the algorithm.
Parameters:
-----------
nr_classes: int
The number of classes in the dataset that will be used
to train the model.
seed: int
The seed that will be used for the model.
nr_threads: int
The number of parallel threads that will be used for
the model.
task_id: int
The id of the task that is used for the experiment.
output_directory: str
The path to the output directory where the results and
model can be stored.
Returns:
--------
param: dict
A dictionary that will be used as a configuration for the
algorithm.
"""
param = {
'disable_default_eval_metric': 1,
'seed': seed,
'nthread': nr_threads,
'task_id': task_id,
'output_directory': output_directory,
}
if nr_classes != 2:
param.update(
{
'objective': 'multi:softmax',
'num_class': nr_classes + 1,
}
)
else:
param.update(
{
'objective': 'binary:logistic',
}
)
return param
class TabNetWorker(Worker):
def __init__(
self,
*args,
param: dict,
**kwargs,
):
super().__init__(*args, **kwargs)
self.param = deepcopy(param)
self.task_id = self.param['task_id']
del self.param['task_id']
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(self.param['seed'])
os.environ['OMP_NUM_THREADS'] = '1'
def compute(self, config: dict, budget: float, **kwargs) -> Dict:
"""What should be computed for one TabNet worker.
The function takes a configuration and a budget, it
then uses the tabnet algorithm to generate a loss
and other information.
Parameters:
-----------
config: dict
dictionary containing the sampled configurations by the optimizer
budget: float
amount of time/epochs/etc. the model can use to train
Returns:
--------
dict:
With the following mandatory arguments:
'loss' (scalar)
'info' (dict)
"""
# Always activate imputation for TabNet.
# No encoding needed, TabNet makes it's own embeddings.
loader = Loader(
task_id=self.task_id,
seed=self.param['seed'],
apply_one_hot_encoding=False,
apply_imputation=True,
)
splits = loader.get_splits()
X_train = splits['X_train']
X_val = splits['X_val']
X_test = splits['X_test']
y_train = splits['y_train']
y_val = splits['y_val']
y_test = splits['y_test']
categorical_information = loader.categorical_information
assert categorical_information is not None
_ = categorical_information['categorical_ind']
categorical_columns = categorical_information['categorical_columns']
categorical_dimensions = categorical_information['categorical_dimensions']
# Default value if early stopping is not activated
best_iteration = None
clf = TabNetClassifier(
n_a=config['na'],
n_d=config['na'],
n_steps=config['nsteps'],
gamma=config['gamma'],
lambda_sparse=config['lambda_sparse'],
momentum=config['mb'],
cat_idxs=categorical_columns,
cat_dims=categorical_dimensions,
seed=self.param['seed'],
optimizer_params={
'lr': config['learning_rate'],
},
scheduler_params={
'step_size': config['decay_iterations'],
'gamma': config['decay_rate'],
},
scheduler_fn=torch.optim.lr_scheduler.StepLR,
)
batch_size = config['batch_size']
if batch_size == 32768:
vbatch_size = config['vbatch_size1']
elif batch_size == 16384:
vbatch_size = config['vbatch_size2']
elif batch_size == 8192:
vbatch_size = config['vbatch_size3']
elif batch_size == 4096:
vbatch_size = config['vbatch_size4']
elif batch_size == 2048:
vbatch_size = config['vbatch_size5']
elif batch_size == 1024:
vbatch_size = config['vbatch_size6']
elif batch_size == 512:
vbatch_size = config['vbatch_size7']
elif batch_size == 256:
vbatch_size = config['vbatch_size8']
else:
raise ValueError('Illegal batch size given')
early_stopping_activated = True if 'early_stopping_rounds' in config else False
clf.fit(
X_train=X_train,
y_train=y_train,
batch_size=batch_size,
virtual_batch_size=vbatch_size,
eval_set=[(X_val, y_val)],
eval_name=['Validation'],
eval_metric=['balanced_accuracy'],
max_epochs=200,
patience=config['early_stopping_rounds'] if early_stopping_activated else 0,
)
if early_stopping_activated:
best_iteration = clf.best_epoch
y_train_preds = clf.predict(X_train)
y_val_preds = clf.predict(X_val)
y_test_preds = clf.predict(X_test)
train_performance = balanced_accuracy_score(y_train, y_train_preds)
val_performance = balanced_accuracy_score(y_val, y_val_preds)
test_performance = balanced_accuracy_score(y_test, y_test_preds)
if val_performance is None or val_performance is np.inf:
val_error_rate = 1
else:
val_error_rate = 1 - val_performance
res = {
'train_accuracy': float(train_performance),
'val_accuracy': float(val_performance),
'test_accuracy': float(test_performance),
'best_round': best_iteration,
}
return ({
'loss': float(val_error_rate), # this is the a mandatory field to run hyperband
'info': res # can be used for any user-defined information - also mandatory
})
def refit(self, config: dict) -> Dict:
"""Runs refit on the best configuration.
The function refits on the best configuration. It then
proceeds to train and test the network, this time combining
the train and validation set together for training. Probably,
in the future, a budget should be added too as an argument to
the parameter.
Parameters:
-----------
config: dict
dictionary containing the sampled configurations by the optimizer
Returns:
--------
res: dict
Dictionary with the train and test accuracy.
"""
# early stopping was activated in this experiment
if 'max_epochs' in config:
max_epochs = config['max_epochs']
else:
max_epochs = 200
# Always activate imputation for TabNet.
# No encoding needed, TabNet makes it's own embeddings
loader = Loader(
task_id=self.task_id,
val_fraction=0,
seed=self.param['seed'],
apply_one_hot_encoding=False,
apply_imputation=True,
)
splits = loader.get_splits()
categorical_information = loader.categorical_information
assert categorical_information is not None
_ = categorical_information['categorical_ind']
categorical_columns = categorical_information['categorical_columns']
categorical_dimensions = categorical_information['categorical_dimensions']
X_train = splits['X_train']
X_test = splits['X_test']
y_train = splits['y_train']
y_test = splits['y_test']
clf = TabNetClassifier(
n_a=config['na'],
n_d=config['na'],
n_steps=config['nsteps'],
gamma=config['gamma'],
lambda_sparse=config['lambda_sparse'],
momentum=config['mb'],
cat_idxs=categorical_columns,
cat_dims=categorical_dimensions,
seed=self.param['seed'],
optimizer_params={
'lr': config['learning_rate'],
},
scheduler_params={
'step_size': config['decay_iterations'],
'gamma': config['decay_rate'],
},
scheduler_fn=torch.optim.lr_scheduler.StepLR,
)
batch_size = config['batch_size']
if batch_size == 32768:
vbatch_size = config['vbatch_size1']
elif batch_size == 16384:
vbatch_size = config['vbatch_size2']
elif batch_size == 8192:
vbatch_size = config['vbatch_size3']
elif batch_size == 4096:
vbatch_size = config['vbatch_size4']
elif batch_size == 2048:
vbatch_size = config['vbatch_size5']
elif batch_size == 1024:
vbatch_size = config['vbatch_size6']
elif batch_size == 512:
vbatch_size = config['vbatch_size7']
elif batch_size == 256:
vbatch_size = config['vbatch_size8']
else:
raise ValueError('Illegal batch size given')
clf.fit(
X_train=X_train, y_train=y_train,
batch_size=batch_size,
virtual_batch_size=vbatch_size,
eval_metric=['balanced_accuracy'],
max_epochs=max_epochs,
patience=0,
)
y_train_preds = clf.predict(X_train)
y_test_preds = clf.predict(X_test)
train_performance = balanced_accuracy_score(y_train, y_train_preds)
test_performance = balanced_accuracy_score(y_test, y_test_preds)
if test_performance is None or test_performance is np.inf:
test_performance = 0
res = {
'train_accuracy': float(train_performance),
'test_accuracy': float(test_performance),
}
return res
@staticmethod
def get_default_configspace(
seed: int = 11,
) -> cs.ConfigurationSpace:
"""Get the hyperparameter search space.
The function provides the configuration space that is
used to generate the algorithm specific hyperparameter
search space.
Parameters:
-----------
seed: int
The seed used to build the configuration space.
Returns:
--------
config_space: cs.ConfigurationSpace
Configuration space for TabNet.
"""
config_space = cs.ConfigurationSpace(seed=seed)
# learning rate
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'na',
choices=[8, 16, 24, 32, 64, 128],
)
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'learning_rate',
choices=[0.005, 0.01, 0.02, 0.025],
)
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'gamma',
choices=[1.0, 1.2, 1.5, 2.0],
)
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'nsteps',
choices=[3, 4, 5, 6, 7, 8, 9, 10],
)
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'lambda_sparse',
choices=[0, 0.000001, 0.0001, 0.001, 0.01, 0.1],
)
)
batch_size = cs.CategoricalHyperparameter(
'batch_size',
choices=[256, 512, 1024, 2048, 4096, 8192, 16384, 32768],
)
vbatch_size1 = cs.CategoricalHyperparameter(
'vbatch_size1',
choices=[256, 512, 1024, 2048, 4096],
)
vbatch_size2 = cs.CategoricalHyperparameter(
'vbatch_size2',
choices=[256, 512, 1024, 2048, 4096],
)
vbatch_size3 = cs.CategoricalHyperparameter(
'vbatch_size3',
choices=[256, 512, 1024, 2048, 4096],
)
vbatch_size4 = cs.CategoricalHyperparameter(
'vbatch_size4',
choices=[256, 512, 1024, 2048],
)
vbatch_size5 = cs.CategoricalHyperparameter(
'vbatch_size5',
choices=[256, 512, 1024],
)
vbatch_size6 = cs.CategoricalHyperparameter(
'vbatch_size6',
choices=[256, 512],
)
vbatch_size7 = cs.Constant(
'vbatch_size7',
256
)
vbatch_size8 = cs.Constant(
'vbatch_size8',
256
)
config_space.add_hyperparameter(
batch_size
)
config_space.add_hyperparameters(
[
vbatch_size1,
vbatch_size2,
vbatch_size3,
vbatch_size4,
vbatch_size5,
vbatch_size6,
vbatch_size7,
vbatch_size8,
]
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'decay_rate',
choices=[0.4, 0.8, 0.9, 0.95],
)
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'decay_iterations',
choices=[500, 2000, 8000, 10000, 20000],
)
)
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'early_stopping_rounds',
lower=1,
upper=20,
)
)
config_space.add_hyperparameter(
cs.CategoricalHyperparameter(
'mb',
choices=[0.6, 0.7, 0.8, 0.9, 0.95, 0.98],
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size1,
batch_size,
32768,
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size2,
batch_size,
16384,
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size3,
batch_size,
8192,
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size4,
batch_size,
4096,
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size5,
batch_size,
2048,
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size6,
batch_size,
1024,
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size7,
batch_size,
512,
)
)
config_space.add_condition(
cs.EqualsCondition(
vbatch_size8,
batch_size,
256,
)
)
return config_space
@staticmethod
def get_parameters(
seed: int = 11,
task_id: int = 233088,
) -> Dict[str, Union[int, str]]:
"""Get the parameters of the method.
Get a dictionary based on the arguments given to the
function, which will be used to as the initial configuration
for the algorithm.
Parameters:
-----------
seed: int
The seed that will be used for the model.
task_id: int
The id of the task that will be used for the experiment.
Returns:
--------
param: dict
A dictionary that will be used as a configuration for the
algorithm.
"""
param = {
'task_id': task_id,
'seed': seed,
}
return param
class CatBoostWorker(Worker):
def __init__(
self,
*args,
param: dict,
**kwargs,
):
super().__init__(*args, **kwargs)
self.param = deepcopy(param)
self.task_id = self.param['task_id']
self.seed = self.param['seed']
self.output_directory = self.param['output_directory']
del self.param['task_id']
del self.param['seed']
del self.param['output_directory']
def compute(self, config: dict, budget: float, **kwargs) -> Dict:
"""What should be computed for one CatBoost worker.
The function takes a configuration and a budget, it
then uses the CatBoost algorithm to generate a loss
and other information.
Parameters:
-----------
config: dict
dictionary containing the sampled configurations by the optimizer
budget: float
amount of time/epochs/etc. the model can use to train
Returns:
--------
dict:
With the following mandatory arguments:
'loss' (scalar)
'info' (dict)
"""
# budget at the moment is not used because we do
# not make use of multi-fidelity
# Always activate imputation for CatBoost.
# No encoding needed, CatBoost deals with it
# natively.
loader = Loader(
task_id=self.task_id,
seed=self.seed,
apply_one_hot_encoding=False,
apply_imputation=True,
)
splits = loader.get_splits()
X_train = splits['X_train']
X_val = splits['X_val']
X_test = splits['X_test']
y_train = splits['y_train']
y_val = splits['y_val']
y_test = splits['y_test']
categorical_information = loader.categorical_information
assert categorical_information is not None
categorical_feature_indices = loader.categorical_information['categorical_columns']
# Default value if early stopping is not activated
best_iteration = None
params = {
'iterations': config['iterations'],
'learning_rate': config['learning_rate'],
'random_strength': config['random_strength'],
'one_hot_max_size': config['one_hot_max_size'],
'random_seed': self.seed,
'l2_leaf_reg': config['l2_leaf_reg'],
'bagging_temperature': config['bagging_temperature'],
'leaf_estimation_iterations': config['leaf_estimation_iterations'],
}
model = CatBoostClassifier(
**params,
)
model.fit(
X_train,
y_train,
cat_features=categorical_feature_indices,
eval_set=(X_val, y_val),
plot=False,
)
y_train_preds = model.predict(X_train)
y_val_preds = model.predict(X_val)
y_test_preds = model.predict(X_test)
train_performance = balanced_accuracy_score(y_train, y_train_preds)
val_performance = balanced_accuracy_score(y_val, y_val_preds)
test_performance = balanced_accuracy_score(y_test, y_test_preds)
if val_performance is None or val_performance is np.inf:
val_error_rate = 1
else:
val_error_rate = 1 - val_performance
res = {
'train_accuracy': float(train_performance),
'val_accuracy': float(val_performance),
'test_accuracy': float(test_performance),
'best_round': best_iteration,
}
return ({
'loss': float(val_error_rate), # this is the a mandatory field to run hyperband
'info': res # can be used for any user-defined information - also mandatory
})
def refit(self, config: dict) -> Dict:
"""Runs refit on the best configuration.
The function refits on the best configuration. It then
proceeds to train and test gbdt, this time combining
the train and validation set together for training. Probably,
in the future, a budget should be added too as an argument to
the parameter.
Parameters:
-----------
config: dict
dictionary containing the sampled configurations by the optimizer
Returns:
--------
res: dict
Dictionary with the train and test accuracy.
"""
# Always activate imputation for CatBoost.
loader = Loader(
task_id=self.task_id,
val_fraction=0,
seed=self.seed,
apply_one_hot_encoding=False,
apply_imputation=True,
)
splits = loader.get_splits()
X_train = splits['X_train']
X_test = splits['X_test']
y_train = splits['y_train']
y_test = splits['y_test']
categorical_information = loader.categorical_information
assert categorical_information is not None
categorical_feature_indices = loader.categorical_information['categorical_columns']
params = {
'iterations': config['iterations'],
'learning_rate': config['learning_rate'],
'random_strength': config['random_strength'],
'one_hot_max_size': config['one_hot_max_size'],
'random_seed': self.seed,
'l2_leaf_reg': config['l2_leaf_reg'],
'bagging_temperature': config['bagging_temperature'],
'leaf_estimation_iterations': config['leaf_estimation_iterations'],
}
model = CatBoostClassifier(
**params,
)
model.fit(
X_train,
y_train,
cat_features=categorical_feature_indices,
plot=False,
)
model.save_model(
os.path.join(
self.output_directory,
'catboost_refit_model.dump',
)
)
y_train_preds = model.predict(X_train)
y_test_preds = model.predict(X_test)
train_performance = balanced_accuracy_score(y_train, y_train_preds)
test_performance = balanced_accuracy_score(y_test, y_test_preds)
if test_performance is None or test_performance is np.inf:
test_performance = 0
res = {
'train_accuracy': float(train_performance),
'test_accuracy': float(test_performance),
}
return res
@staticmethod
def get_default_configspace(
seed: int = 11,
) -> cs.ConfigurationSpace:
"""Get the hyperparameter search space.
The function provides the configuration space that is
used to generate the algorithm specific hyperparameter
search space.
Parameters:
-----------
seed: int
The seed used to build the configuration space.
Returns:
--------
config_space: cs.ConfigurationSpace
Configuration space for CatBoost.
"""
config_space = cs.ConfigurationSpace(seed=seed)
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'iterations',
lower=1,
upper=1000,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'learning_rate',
lower=1e-7,
upper=1,
log=True,
)
)
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'random_strength',
lower=1,
upper=20,
)
)
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'one_hot_max_size',
lower=0,
upper=25,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'l2_leaf_reg',
lower=1,
upper=10,
log=True,
)
)
config_space.add_hyperparameter(
cs.UniformFloatHyperparameter(
'bagging_temperature',
lower=0,
upper=1,
)
)
config_space.add_hyperparameter(
cs.UniformIntegerHyperparameter(
'leaf_estimation_iterations',
lower=1,
upper=10,
)
)
return config_space
@staticmethod
def get_parameters(
seed: int = 11,
nr_classes: int = 2,
task_id: int = 233088,
output_directory: str = 'path_to_output',
) -> Dict[str, Union[int, str]]:
"""Get the parameters of the method.
Get a dictionary based on the arguments given to the
function, which will be used to as the initial configuration
for the algorithm.
Parameters:
-----------
seed: int
The seed that will be used for the model.
nr_classes: int
The number of classes in the dataset, which in turn
will be used to determine the loss.
task_id: int
The id of the task that will be used for the experiment.
output_directory: str
THe path where the output results will be stored.
Returns:
--------
param: dict
A dictionary that will be used as a configuration for the
algorithm.
"""
param = {
'task_id': task_id,
'seed': seed,
'output_directory': output_directory,
}
if nr_classes != 2:
param.update(
{
'loss_function': 'MultiClass',
'eval_metric': metrics.Accuracy(),
}
)
else:
param.update(
{
'loss_function': 'Logloss',
'eval_metric': metrics.BalancedAccuracy(),
}
)
return param
|
[] |
[] |
[
"OMP_NUM_THREADS"
] |
[]
|
["OMP_NUM_THREADS"]
|
python
| 1 | 0 | |
handlers/doctor.go
|
package handlers
import (
"os"
"github.com/apex/log"
"github.com/metrue/fx/constants"
"github.com/metrue/fx/context"
"github.com/metrue/fx/doctor"
)
// Doctor command handle
func Doctor() HandleFunc {
return func(ctx *context.Context) error {
host := os.Getenv("DOCKER_REMOTE_HOST_ADDR")
user := os.Getenv("DOCKER_REMOTE_HOST_USER")
password := os.Getenv("DOCKER_REMOTE_HOST_PASSWORD")
if host == "" {
host = "localhost"
}
if err := doctor.New(host, user, password).Start(); err != nil {
log.Warnf("machine %s is in dirty state: %v", host, err)
} else {
log.Infof("machine %s is in healthy state: %s", host, constants.CheckedSymbol)
}
return nil
}
}
|
[
"\"DOCKER_REMOTE_HOST_ADDR\"",
"\"DOCKER_REMOTE_HOST_USER\"",
"\"DOCKER_REMOTE_HOST_PASSWORD\""
] |
[] |
[
"DOCKER_REMOTE_HOST_USER",
"DOCKER_REMOTE_HOST_ADDR",
"DOCKER_REMOTE_HOST_PASSWORD"
] |
[]
|
["DOCKER_REMOTE_HOST_USER", "DOCKER_REMOTE_HOST_ADDR", "DOCKER_REMOTE_HOST_PASSWORD"]
|
go
| 3 | 0 | |
scripts/west_commands/build.py
|
# Copyright (c) 2018 Foundries.io
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import pathlib
import shlex
import sys
from west import log
from west.configuration import config
from zcmake import DEFAULT_CMAKE_GENERATOR, run_cmake, run_build, CMakeCache
from build_helpers import is_zephyr_build, find_build_dir, \
FIND_BUILD_DIR_DESCRIPTION
from zephyr_ext_common import Forceable
_ARG_SEPARATOR = '--'
BUILD_USAGE = '''\
west build [-h] [-b BOARD] [-d BUILD_DIR]
[-t TARGET] [-p {auto, always, never}] [-c] [--cmake-only]
[-n] [-o BUILD_OPT] [-f]
[source_dir] -- [cmake_opt [cmake_opt ...]]
'''
BUILD_DESCRIPTION = f'''\
Convenience wrapper for building Zephyr applications.
{FIND_BUILD_DIR_DESCRIPTION}
positional arguments:
source_dir application source directory
cmake_opt extra options to pass to cmake; implies -c
(these must come after "--" as shown above)
'''
PRISTINE_DESCRIPTION = """\
A "pristine" build directory is empty. The -p option controls
whether the build directory is made pristine before the build
is done. A bare '--pristine' with no value is the same as
--pristine=always. Setting --pristine=auto uses heuristics to
guess if a pristine build may be necessary."""
def _banner(msg):
log.inf('-- west build: ' + msg, colorize=True)
def config_get(option, fallback):
return config.get('build', option, fallback=fallback)
def config_getboolean(option, fallback):
return config.getboolean('build', option, fallback=fallback)
class AlwaysIfMissing(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values or 'always')
class Build(Forceable):
def __init__(self):
super(Build, self).__init__(
'build',
# Keep this in sync with the string in west-commands.yml.
'compile a Zephyr application',
BUILD_DESCRIPTION,
accepts_unknown_args=True)
self.source_dir = None
'''Source directory for the build, or None on error.'''
self.build_dir = None
'''Final build directory used to run the build, or None on error.'''
self.created_build_dir = False
'''True if the build directory was created; False otherwise.'''
self.run_cmake = False
'''True if CMake was run; False otherwise.
Note: this only describes CMake runs done by this command. The
build system generated by CMake may also update itself due to
internal logic.'''
self.cmake_cache = None
'''Final parsed CMake cache for the build, or None on error.'''
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(
self.name,
help=self.help,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.description,
usage=BUILD_USAGE)
# Remember to update scripts/west-completion.bash if you add or remove
# flags
parser.add_argument('-b', '--board', help='board to build for')
# Hidden option for backwards compatibility
parser.add_argument('-s', '--source-dir', help=argparse.SUPPRESS)
parser.add_argument('-d', '--build-dir',
help='build directory to create or use')
self.add_force_arg(parser)
group = parser.add_argument_group('cmake and build tool')
group.add_argument('-c', '--cmake', action='store_true',
help='force a cmake run')
group.add_argument('--cmake-only', action='store_true',
help="just run cmake; don't build (implies -c)")
group.add_argument('-t', '--target',
help='''run this build system target (try "-t usage"
or "-t help")''')
group.add_argument('-o', '--build-opt', default=[], action='append',
help='''options to pass to the build tool
(make or ninja); may be given more than once''')
group.add_argument('-n', '--just-print', '--dry-run', '--recon',
dest='dry_run', action='store_true',
help="just print build commands; don't run them")
group = parser.add_argument_group('pristine builds',
PRISTINE_DESCRIPTION)
group.add_argument('-p', '--pristine', choices=['auto', 'always',
'never'], action=AlwaysIfMissing, nargs='?',
help='pristine build folder setting')
return parser
def do_run(self, args, remainder):
self.args = args # Avoid having to pass them around
self.config_board = config_get('board', None)
log.dbg('args: {} remainder: {}'.format(args, remainder),
level=log.VERBOSE_EXTREME)
# Store legacy -s option locally
source_dir = self.args.source_dir
self._parse_remainder(remainder)
if source_dir:
if self.args.source_dir:
log.die("source directory specified twice:({} and {})".format(
source_dir, self.args.source_dir))
self.args.source_dir = source_dir
log.dbg('source_dir: {} cmake_opts: {}'.format(self.args.source_dir,
self.args.cmake_opts),
level=log.VERBOSE_EXTREME)
self._sanity_precheck()
self._setup_build_dir()
if args.pristine is not None:
pristine = args.pristine
else:
# Load the pristine={auto, always, never} configuration value
pristine = config_get('pristine', 'auto')
if pristine not in ['auto', 'always', 'never']:
log.wrn(
'treating unknown build.pristine value "{}" as "never"'.
format(pristine))
pristine = 'never'
self.auto_pristine = (pristine == 'auto')
log.dbg('pristine: {} auto_pristine: {}'.format(pristine,
self.auto_pristine),
level=log.VERBOSE_VERY)
if is_zephyr_build(self.build_dir):
if pristine == 'always':
self._run_pristine()
self.run_cmake = True
else:
self._update_cache()
if (self.args.cmake or self.args.cmake_opts or
self.args.cmake_only):
self.run_cmake = True
else:
self.run_cmake = True
self.source_dir = self._find_source_dir()
self._sanity_check()
board, origin = self._find_board()
self._run_cmake(board, origin, self.args.cmake_opts)
if args.cmake_only:
return
self._sanity_check()
self._update_cache()
self._run_build(args.target)
def _find_board(self):
board, origin = None, None
if self.cmake_cache:
board, origin = (self.cmake_cache.get('CACHED_BOARD'),
'CMakeCache.txt')
elif self.args.board:
board, origin = self.args.board, 'command line'
elif 'BOARD' in os.environ:
board, origin = os.environ['BOARD'], 'env'
elif self.config_board is not None:
board, origin = self.config_board, 'configfile'
return board, origin
def _parse_remainder(self, remainder):
self.args.source_dir = None
self.args.cmake_opts = None
try:
# Only one source_dir is allowed, as the first positional arg
if remainder[0] != _ARG_SEPARATOR:
self.args.source_dir = remainder[0]
remainder = remainder[1:]
# Only the first argument separator is consumed, the rest are
# passed on to CMake
if remainder[0] == _ARG_SEPARATOR:
remainder = remainder[1:]
if remainder:
self.args.cmake_opts = remainder
except IndexError:
return
def _sanity_precheck(self):
app = self.args.source_dir
if app:
self.check_force(
os.path.isdir(app),
'source directory {} does not exist'.format(app))
self.check_force(
'CMakeLists.txt' in os.listdir(app),
"{} doesn't contain a CMakeLists.txt".format(app))
def _update_cache(self):
try:
self.cmake_cache = CMakeCache.from_build_dir(self.build_dir)
except FileNotFoundError:
pass
def _setup_build_dir(self):
# Initialize build_dir and created_build_dir attributes.
# If we created the build directory, we must run CMake.
log.dbg('setting up build directory', level=log.VERBOSE_EXTREME)
# The CMake Cache has not been loaded yet, so this is safe
board, _ = self._find_board()
source_dir = self._find_source_dir()
app = os.path.split(source_dir)[1]
build_dir = find_build_dir(self.args.build_dir, board=board,
source_dir=source_dir, app=app)
if not build_dir:
log.die('Unable to determine a default build folder. Check '
'your build.dir-fmt configuration option')
if os.path.exists(build_dir):
if not os.path.isdir(build_dir):
log.die('build directory {} exists and is not a directory'.
format(build_dir))
else:
os.makedirs(build_dir, exist_ok=False)
self.created_build_dir = True
self.run_cmake = True
self.build_dir = build_dir
def _find_source_dir(self):
# Initialize source_dir attribute, either from command line argument,
# implicitly from the build directory's CMake cache, or using the
# default (current working directory).
log.dbg('setting up source directory', level=log.VERBOSE_EXTREME)
if self.args.source_dir:
source_dir = self.args.source_dir
elif self.cmake_cache:
source_dir = self.cmake_cache.get('CMAKE_HOME_DIRECTORY')
if not source_dir:
# This really ought to be there. The build directory
# must be corrupted somehow. Let's see what we can do.
log.die('build directory', self.build_dir,
'CMake cache has no CMAKE_HOME_DIRECTORY;',
'please give a source_dir')
else:
source_dir = os.getcwd()
return os.path.abspath(source_dir)
def _sanity_check_source_dir(self):
if self.source_dir == self.build_dir:
# There's no forcing this.
log.die('source and build directory {} cannot be the same; '
'use --build-dir {} to specify a build directory'.
format(self.source_dir, self.build_dir))
srcrel = os.path.relpath(self.source_dir)
self.check_force(
not is_zephyr_build(self.source_dir),
'it looks like {srcrel} is a build directory: '
'did you mean --build-dir {srcrel} instead?'.
format(srcrel=srcrel))
self.check_force(
'CMakeLists.txt' in os.listdir(self.source_dir),
'source directory "{srcrel}" does not contain '
'a CMakeLists.txt; is this really what you '
'want to build? (Use -s SOURCE_DIR to specify '
'the application source directory)'.
format(srcrel=srcrel))
def _sanity_check(self):
# Sanity check the build configuration.
# Side effect: may update cmake_cache attribute.
log.dbg('sanity checking the build', level=log.VERBOSE_EXTREME)
self._sanity_check_source_dir()
if not self.cmake_cache:
return # That's all we can check without a cache.
if "CMAKE_PROJECT_NAME" not in self.cmake_cache:
# This happens sometimes when a build system is not
# completely generated due to an error during the
# CMake configuration phase.
self.run_cmake = True
cached_app = self.cmake_cache.get('APPLICATION_SOURCE_DIR')
log.dbg('APPLICATION_SOURCE_DIR:', cached_app,
level=log.VERBOSE_EXTREME)
source_abs = (os.path.abspath(self.args.source_dir)
if self.args.source_dir else None)
cached_abs = os.path.abspath(cached_app) if cached_app else None
log.dbg('pristine:', self.auto_pristine, level=log.VERBOSE_EXTREME)
# If the build directory specifies a source app, make sure it's
# consistent with --source-dir.
apps_mismatched = (source_abs and cached_abs and
pathlib.PurePath(source_abs) != pathlib.PurePath(cached_abs))
self.check_force(
not apps_mismatched or self.auto_pristine,
'Build directory "{}" is for application "{}", but source '
'directory "{}" was specified; please clean it, use --pristine, '
'or use --build-dir to set another build directory'.
format(self.build_dir, cached_abs, source_abs))
if apps_mismatched:
self.run_cmake = True # If they insist, we need to re-run cmake.
# If CACHED_BOARD is not defined, we need some other way to
# find the board.
cached_board = self.cmake_cache.get('CACHED_BOARD')
log.dbg('CACHED_BOARD:', cached_board, level=log.VERBOSE_EXTREME)
# If apps_mismatched and self.auto_pristine are true, we will
# run pristine on the build, invalidating the cached
# board. In that case, we need some way of getting the board.
self.check_force((cached_board and
not (apps_mismatched and self.auto_pristine))
or self.args.board or self.config_board or
os.environ.get('BOARD'),
'Cached board not defined, please provide it '
'(provide --board, set default with '
'"west config build.board <BOARD>", or set '
'BOARD in the environment)')
# Check consistency between cached board and --board.
boards_mismatched = (self.args.board and cached_board and
self.args.board != cached_board)
self.check_force(
not boards_mismatched or self.auto_pristine,
'Build directory {} targets board {}, but board {} was specified. '
'(Clean the directory, use --pristine, or use --build-dir to '
'specify a different one.)'.
format(self.build_dir, cached_board, self.args.board))
if self.auto_pristine and (apps_mismatched or boards_mismatched):
self._run_pristine()
self.cmake_cache = None
log.dbg('run_cmake:', True, level=log.VERBOSE_EXTREME)
self.run_cmake = True
# Tricky corner-case: The user has not specified a build folder but
# there was one in the CMake cache. Since this is going to be
# invalidated, reset to CWD and re-run the basic tests.
if ((boards_mismatched and not apps_mismatched) and
(not source_abs and cached_abs)):
self.source_dir = self._find_source_dir()
self._sanity_check_source_dir()
def _run_cmake(self, board, origin, cmake_opts):
if board is None and config_getboolean('board_warn', True):
log.wrn('This looks like a fresh build and BOARD is unknown;',
"so it probably won't work. To fix, use",
'--board=<your-board>.')
log.inf('Note: to silence the above message, run',
"'west config build.board_warn false'")
if not self.run_cmake:
return
_banner('generating a build system')
if board is not None and origin != 'CMakeCache.txt':
cmake_opts = ['-DBOARD={}'.format(board)]
else:
cmake_opts = []
if self.args.cmake_opts:
cmake_opts.extend(self.args.cmake_opts)
user_args = config_get('cmake-args', None)
if user_args:
cmake_opts.extend(shlex.split(user_args))
# Invoke CMake from the current working directory using the
# -S and -B options (officially introduced in CMake 3.13.0).
# This is important because users expect invocations like this
# to Just Work:
#
# west build -- -DOVERLAY_CONFIG=relative-path.conf
final_cmake_args = ['-DWEST_PYTHON={}'.format(sys.executable),
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-G{}'.format(config_get('generator',
DEFAULT_CMAKE_GENERATOR))]
if cmake_opts:
final_cmake_args.extend(cmake_opts)
run_cmake(final_cmake_args, dry_run=self.args.dry_run)
def _run_pristine(self):
_banner('making build dir {} pristine'.format(self.build_dir))
if not is_zephyr_build(self.build_dir):
log.die('Refusing to run pristine on a folder that is not a '
'Zephyr build system')
cache = CMakeCache.from_build_dir(self.build_dir)
cmake_args = ['-P', cache['ZEPHYR_BASE'] + '/cmake/pristine.cmake']
run_cmake(cmake_args, cwd=self.build_dir, dry_run=self.args.dry_run)
def _run_build(self, target):
if target:
_banner('running target {}'.format(target))
elif self.run_cmake:
_banner('building application')
extra_args = ['--target', target] if target else []
if self.args.build_opt:
extra_args.append('--')
extra_args.extend(self.args.build_opt)
if self.args.verbose:
self._append_verbose_args(extra_args,
not bool(self.args.build_opt))
run_build(self.build_dir, extra_args=extra_args,
dry_run=self.args.dry_run)
def _append_verbose_args(self, extra_args, add_dashes):
# These hacks are only needed for CMake versions earlier than
# 3.14. When Zephyr's minimum version is at least that, we can
# drop this nonsense and just run "cmake --build BUILD -v".
self._update_cache()
if not self.cmake_cache:
return
generator = self.cmake_cache.get('CMAKE_GENERATOR')
if not generator:
return
# Substring matching is for things like "Eclipse CDT4 - Ninja".
if 'Ninja' in generator:
if add_dashes:
extra_args.append('--')
extra_args.append('-v')
elif generator == 'Unix Makefiles':
if add_dashes:
extra_args.append('--')
extra_args.append('VERBOSE=1')
|
[] |
[] |
[
"BOARD"
] |
[]
|
["BOARD"]
|
python
| 1 | 0 | |
api/_examples/gcp_pub_sub-alert-channel/main.go
|
package main
import (
"fmt"
"log"
"os"
"github.com/lacework/go-sdk/api"
)
func main() {
lacework, err := api.NewClient(os.Getenv("LW_ACCOUNT"), api.WithApiV2(),
api.WithApiKeys(os.Getenv("LW_API_KEY"), os.Getenv("LW_API_SECRET")))
if err != nil {
log.Fatal(err)
}
myGcpPubSubChannel := api.NewAlertChannel("gcp-pub-sub-alert-from-golang",
api.GcpPubSubAlertChannelType,
api.GcpPubSubDataV2{
ProjectID: "my-sample-project-191923",
TopicID: "mytopic",
Credentials: api.GcpPubSubCredentials{
ClientID: "client_id",
ClientEmail: "[email protected]",
PrivateKey: "priv_key",
PrivateKeyID: "p_key_id",
},
},
)
response, err := lacework.V2.AlertChannels.Create(myGcpPubSubChannel)
if err != nil {
log.Fatal(err)
}
// Output: Gcp Pub Sub alert channel created: THE-INTEGRATION-GUID
fmt.Printf("Gcp Pub Sub alert channel created: %s", response.Data.IntgGuid)
}
|
[
"\"LW_ACCOUNT\"",
"\"LW_API_KEY\"",
"\"LW_API_SECRET\""
] |
[] |
[
"LW_API_SECRET",
"LW_ACCOUNT",
"LW_API_KEY"
] |
[]
|
["LW_API_SECRET", "LW_ACCOUNT", "LW_API_KEY"]
|
go
| 3 | 0 | |
main_test.go
|
package gorm_test
import (
"database/sql"
"database/sql/driver"
"fmt"
"strconv"
_ "github.com/denisenkom/go-mssqldb"
testdb "github.com/erikstmartin/go-testdb"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"github.com/jinzhu/now"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
"os"
"testing"
"time"
)
var (
DB gorm.DB
t1, t2, t3, t4, t5 time.Time
)
func init() {
var err error
if DB, err = OpenTestConnection(); err != nil {
panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err))
}
// DB.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)})
// DB.SetLogger(log.New(os.Stdout, "\r\n", 0))
// DB.LogMode(true)
DB.LogMode(false)
DB.DB().SetMaxIdleConns(10)
runMigration()
}
func OpenTestConnection() (db gorm.DB, err error) {
switch os.Getenv("GORM_DIALECT") {
case "mysql":
// CREATE USER 'gorm'@'localhost' IDENTIFIED BY 'gorm';
// CREATE DATABASE gorm;
// GRANT ALL ON gorm.* TO 'gorm'@'localhost';
fmt.Println("testing mysql...")
db, err = gorm.Open("mysql", "gorm:gorm@/gorm?charset=utf8&parseTime=True")
case "postgres":
fmt.Println("testing postgres...")
db, err = gorm.Open("postgres", "user=gorm DB.name=gorm sslmode=disable")
case "foundation":
fmt.Println("testing foundation...")
db, err = gorm.Open("foundation", "dbname=gorm port=15432 sslmode=disable")
case "mssql":
fmt.Println("testing mssql...")
db, err = gorm.Open("mssql", "server=SERVER_HERE;database=rogue;user id=USER_HERE;password=PW_HERE;port=1433")
default:
fmt.Println("testing sqlite3...")
db, err = gorm.Open("sqlite3", "/tmp/gorm.db")
}
return
}
func TestStringPrimaryKey(t *testing.T) {
type UUIDStruct struct {
ID string `gorm:"primary_key"`
Name string
}
DB.AutoMigrate(&UUIDStruct{})
data := UUIDStruct{ID: "uuid", Name: "hello"}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" {
t.Errorf("string primary key should not be populated")
}
}
func TestExceptionsWithInvalidSql(t *testing.T) {
var columns []string
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
var count1, count2 int64
DB.Model(&User{}).Count(&count1)
if count1 <= 0 {
t.Errorf("Should find some users")
}
if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
DB.Model(&User{}).Count(&count2)
if count1 != count2 {
t.Errorf("No user should not be deleted by invalid SQL")
}
}
func TestSetTable(t *testing.T) {
DB.Create(getPreparedUser("pluck_user1", "pluck_user"))
DB.Create(getPreparedUser("pluck_user2", "pluck_user"))
DB.Create(getPreparedUser("pluck_user3", "pluck_user"))
if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil {
t.Errorf("No errors should happen if set table for pluck", err.Error())
}
var users []User
if DB.Table("users").Find(&[]User{}).Error != nil {
t.Errorf("No errors should happen if set table for find")
}
if DB.Table("invalid_table").Find(&users).Error == nil {
t.Errorf("Should got error when table is set to an invalid table")
}
DB.Exec("drop table deleted_users;")
if DB.Table("deleted_users").CreateTable(&User{}).Error != nil {
t.Errorf("Create table with specified table")
}
DB.Table("deleted_users").Save(&User{Name: "DeletedUser"})
var deletedUsers []User
DB.Table("deleted_users").Find(&deletedUsers)
if len(deletedUsers) != 1 {
t.Errorf("Query from specified table")
}
DB.Save(getPreparedUser("normal_user", "reset_table"))
DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table"))
var user1, user2, user3 User
DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3)
if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") {
t.Errorf("unset specified table with blank string")
}
}
type Order struct {
}
type Cart struct {
}
func (c Cart) TableName() string {
return "shopping_cart"
}
func TestHasTable(t *testing.T) {
type Foo struct {
Id int
Stuff string
}
DB.DropTable(&Foo{})
if ok := DB.HasTable(&Foo{}); ok {
t.Errorf("Table should not exist, but does")
}
if err := DB.CreateTable(&Foo{}).Error; err != nil {
t.Errorf("Table should be created")
}
if ok := DB.HasTable(&Foo{}); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
}
func TestTableName(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
if DB.NewScope(&Order{}).TableName() != "orders" {
t.Errorf("&Order's table name should be orders")
}
if DB.NewScope([]Order{}).TableName() != "orders" {
t.Errorf("[]Order's table name should be orders")
}
if DB.NewScope(&[]Order{}).TableName() != "orders" {
t.Errorf("&[]Order's table name should be orders")
}
DB.SingularTable(true)
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
if DB.NewScope(&Order{}).TableName() != "order" {
t.Errorf("&Order's singular table name should be order")
}
if DB.NewScope([]Order{}).TableName() != "order" {
t.Errorf("[]Order's singular table name should be order")
}
if DB.NewScope(&[]Order{}).TableName() != "order" {
t.Errorf("&[]Order's singular table name should be order")
}
if DB.NewScope(&Cart{}).TableName() != "shopping_cart" {
t.Errorf("&Cart's singular table name should be shopping_cart")
}
if DB.NewScope(Cart{}).TableName() != "shopping_cart" {
t.Errorf("Cart's singular table name should be shopping_cart")
}
if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" {
t.Errorf("&[]Cart's singular table name should be shopping_cart")
}
if DB.NewScope([]Cart{}).TableName() != "shopping_cart" {
t.Errorf("[]Cart's singular table name should be shopping_cart")
}
DB.SingularTable(false)
}
func TestSqlNullValue(t *testing.T) {
DB.DropTable(&NullValue{})
DB.AutoMigrate(&NullValue{})
if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: true},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: true},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv NullValue
DB.First(&nv, "name = ?", "hello")
if nv.Name.String != "hello" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello-2", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv2 NullValue
DB.First(&nv2, "name = ?", "hello-2")
if nv2.Name.String != "hello-2" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello-3", Valid: false},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err == nil {
t.Errorf("Can't save because of name can't be null")
}
}
func TestTransaction(t *testing.T) {
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
t.Errorf("Should find saved record")
}
if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
t.Errorf("Should return the underlying sql.Tx")
}
tx.Rollback()
if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil {
t.Errorf("Should not find record after rollback")
}
tx2 := DB.Begin()
u2 := User{Name: "transcation-2"}
if err := tx2.Save(&u2).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should find saved record")
}
tx2.Commit()
if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should be able to find committed record")
}
}
func TestRow(t *testing.T) {
user1 := User{Name: "RowUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "RowUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "RowUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row()
var age int64
row.Scan(&age)
if age != 10 {
t.Errorf("Scan with Row")
}
}
func TestRows(t *testing.T) {
user1 := User{Name: "RowsUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "RowsUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "RowsUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, but got")
}
count := 0
for rows.Next() {
var name string
var age int64
rows.Scan(&name, &age)
count++
}
if count != 2 {
t.Errorf("Should found two records with name 3")
}
}
func TestScan(t *testing.T) {
user1 := User{Name: "ScanUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "ScanUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "ScanUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Age int
}
var res result
DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res)
if res.Name != user3.Name {
t.Errorf("Scan into struct should work")
}
var doubleAgeRes result
DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes)
if doubleAgeRes.Age != res.Age*2 {
t.Errorf("Scan double age as age")
}
var ress []result
DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Scan into struct map")
}
}
func TestRaw(t *testing.T) {
user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Email string
}
var ress []result
DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Raw with scan")
}
rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows()
count := 0
for rows.Next() {
count++
}
if count != 1 {
t.Errorf("Raw with Rows should find one record with name 3")
}
DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name})
if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.RecordNotFound {
t.Error("Raw sql to update records")
}
}
func TestGroup(t *testing.T) {
rows, err := DB.Select("name").Table("users").Group("name").Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
rows.Scan(&name)
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestJoins(t *testing.T) {
var user = User{
Name: "joins",
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var result User
DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").First(&result)
if result.Name != "joins" || result.Id != user.Id {
t.Errorf("Should find all two emails with Join")
}
}
func TestJoinsWithSelect(t *testing.T) {
type result struct {
Name string
Email string
}
user := User{
Name: "joins_with_select",
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var results []result
DB.Table("users").Select("name, email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results)
if len(results) != 2 || results[0].Email != "[email protected]" || results[1].Email != "[email protected]" {
t.Errorf("Should find all two emails with Join select")
}
}
func TestHaving(t *testing.T) {
rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
var total int64
rows.Scan(&name, &total)
if name == "2" && total != 1 {
t.Errorf("Should have one user having name 2")
}
if name == "3" && total != 2 {
t.Errorf("Should have two users having name 3")
}
}
} else {
t.Errorf("Should not raise any error")
}
}
func DialectHasTzSupport() bool {
// NB: mssql and FoundationDB do not support time zones.
if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" || dialect == "foundation" {
return false
}
return true
}
func TestTimeWithZone(t *testing.T) {
var format = "2006-01-02 15:04:05 -0700"
var times []time.Time
GMT8, _ := time.LoadLocation("Asia/Shanghai")
times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8))
times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC))
for index, vtime := range times {
name := "time_with_zone_" + strconv.Itoa(index)
user := User{Name: name, Birthday: vtime}
if !DialectHasTzSupport() {
// If our driver dialect doesn't support TZ's, just use UTC for everything here.
user.Birthday = vtime.UTC()
}
DB.Save(&user)
expectedBirthday := "2013-02-18 17:51:49 +0000"
foundBirthday := user.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
var findUser, findUser2, findUser3 User
DB.First(&findUser, "name = ?", name)
foundBirthday = findUser.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v or %+v", name, expectedBirthday, foundBirthday)
}
if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() {
t.Errorf("User should be found")
}
if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() {
t.Errorf("User should not be found")
}
}
}
func TestHstore(t *testing.T) {
type Details struct {
Id int64
Bulk gorm.Hstore
}
if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" {
t.Skip()
}
if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil {
fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m")
panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err))
}
DB.Exec("drop table details")
if err := DB.CreateTable(&Details{}).Error; err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait"
bulk := map[string]*string{
"bankAccountId": &bankAccountId,
"phoneNumber": &phoneNumber,
"opinion": &opinion,
}
d := Details{Bulk: bulk}
DB.Save(&d)
var d2 Details
if err := DB.First(&d2).Error; err != nil {
t.Errorf("Got error when tried to fetch details: %+v", err)
}
for k := range bulk {
if r, ok := d2.Bulk[k]; ok {
if res, _ := bulk[k]; *res != *r {
t.Errorf("Details should be equal")
}
} else {
t.Errorf("Details should be existed")
}
}
}
func TestSetAndGet(t *testing.T) {
if value, ok := DB.Set("hello", "world").Get("hello"); !ok {
t.Errorf("Should be able to get setting after set")
} else {
if value.(string) != "world" {
t.Errorf("Setted value should not be changed")
}
}
if _, ok := DB.Get("non_existing"); ok {
t.Errorf("Get non existing key should return error")
}
}
func TestCompatibilityMode(t *testing.T) {
DB, _ := gorm.Open("testdb", "")
testdb.SetQueryFunc(func(query string) (driver.Rows, error) {
columns := []string{"id", "name", "age"}
result := `
1,Tim,20
2,Joe,25
3,Bob,30
`
return testdb.RowsFromCSVString(columns, result), nil
})
var users []User
DB.Find(&users)
if (users[0].Name != "Tim") || len(users) != 3 {
t.Errorf("Unexcepted result returned")
}
}
func TestOpenExistingDB(t *testing.T) {
DB.Save(&User{Name: "jnfeinstein"})
dialect := os.Getenv("GORM_DIALECT")
db, err := gorm.Open(dialect, DB.DB())
if err != nil {
t.Errorf("Should have wrapped the existing DB connection")
}
var user User
if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.RecordNotFound {
t.Errorf("Should have found existing record")
}
}
func BenchmarkGorm(b *testing.B) {
b.N = 2000
for x := 0; x < b.N; x++ {
e := strconv.Itoa(x) + "[email protected]"
email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()}
// Insert
DB.Save(&email)
// Query
DB.First(&BigEmail{}, "email = ?", e)
// Update
DB.Model(&email).UpdateColumn("email", "new-"+e)
// Delete
DB.Delete(&email)
}
}
func BenchmarkRawSql(b *testing.B) {
DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable")
DB.SetMaxIdleConns(10)
insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id"
querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1"
updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3"
deleteSql := "DELETE FROM orders WHERE id = $1"
b.N = 2000
for x := 0; x < b.N; x++ {
var id int64
e := strconv.Itoa(x) + "[email protected]"
email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()}
// Insert
DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id)
// Query
rows, _ := DB.Query(querySql, email.Email)
rows.Close()
// Update
DB.Exec(updateSql, "new-"+e, time.Now(), id)
// Delete
DB.Exec(deleteSql, id)
}
}
|
[
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\""
] |
[] |
[
"GORM_DIALECT"
] |
[]
|
["GORM_DIALECT"]
|
go
| 1 | 0 | |
cmd/e2e_benchmark/main.go
|
package main
import (
"context"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"time"
"github.com/containerd/console"
clientset "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2"
"github.com/docker/compose-on-kubernetes/api/constants"
"github.com/docker/compose-on-kubernetes/install"
e2ewait "github.com/docker/compose-on-kubernetes/internal/e2e/wait"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
appstypes "k8s.io/api/apps/v1beta2"
coretypes "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
k8sclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func (s *workerState) getPhaseTimings(start time.Time) map[string]time.Duration {
last := start
res := make(map[string]time.Duration)
for _, p := range s.PreviousPhases {
res[p.Name] = p.DoneTime.Sub(last)
last = p.DoneTime
}
return res
}
func computePhaseTimingAverages(start time.Time, states []*workerState) []timedPhase {
if len(states) == 0 {
return nil
}
timings := make([]map[string]time.Duration, len(states))
for ix, s := range states {
timings[ix] = s.getPhaseTimings(start)
}
var result []timedPhase
for _, phase := range states[0].PreviousPhases {
count := 0
var total time.Duration
for _, t := range timings {
if v, ok := t[phase.Name]; ok {
count++
total += v
}
}
result = append(result, timedPhase{
duration: time.Duration(int64(total) / int64(count)),
name: phase.Name,
})
}
return result
}
type statusPrinter struct {
out io.Writer
previousLineCount int
}
func (r *statusPrinter) print(states []*workerState, start time.Time, withConsole bool) {
if withConsole {
b := aec.EmptyBuilder
for ix := 0; ix < r.previousLineCount; ix++ {
b = b.Up(1).EraseLine(aec.EraseModes.All)
}
fmt.Fprintf(r.out, b.ANSI.Apply(""))
}
tw := tabwriter.NewWriter(r.out, 5, 1, 4, ' ', 0)
defer tw.Flush()
count := 0
// headers
fmt.Fprint(tw, " ")
maxPrevious := 0
for _, s := range states {
s.Lock()
fmt.Fprintf(tw, "\t%s", strings.ToUpper(s.ID))
if l := len(s.PreviousPhases); l > maxPrevious {
maxPrevious = l
}
s.Unlock()
}
fmt.Fprint(tw, "\n")
count++
for ix := 0; ix < len(states)+1; ix++ {
fmt.Fprint(tw, "---\t")
}
fmt.Fprint(tw, "\n")
count++
// previous steps
for ix := 0; ix < maxPrevious; ix++ {
if ix == 0 {
fmt.Fprint(tw, "PREVIOUS STEPS")
} else {
fmt.Fprint(tw, " ")
}
for _, s := range states {
s.Lock()
fmt.Fprint(tw, "\t")
if len(s.PreviousPhases) > ix {
baseDate := start
if ix > 0 {
baseDate = s.PreviousPhases[ix-1].DoneTime
}
duration := s.PreviousPhases[ix].DoneTime.Sub(baseDate)
fmt.Fprintf(tw, "%s: %v", s.PreviousPhases[ix].Name, duration)
} else {
fmt.Fprint(tw, " ")
}
s.Unlock()
}
fmt.Fprint(tw, "\n")
count++
}
for ix := 0; ix < len(states)+1; ix++ {
fmt.Fprint(tw, "---\t")
}
fmt.Fprint(tw, "\n")
count++
// current step
fmt.Fprint(tw, "CURRENT STEP")
for _, s := range states {
s.Lock()
fmt.Fprintf(tw, "\t%s", s.CurrentPhase)
s.Unlock()
}
fmt.Fprint(tw, "\n")
count++
tw.Write([]byte(" "))
for _, s := range states {
s.Lock()
fmt.Fprintf(tw, "\t%s", s.CurrentMessage)
s.Unlock()
}
fmt.Fprint(tw, "\n")
count++
r.previousLineCount = count
}
func main() {
opts := &options{}
cmd := &cobra.Command{
Use: "e2e_benchmark [options]",
RunE: func(_ *cobra.Command, _ []string) error {
return run(opts)
},
}
cmd.Flags().StringVar(&opts.kubeconfig, "kubeconfig", "", "kubeconfig path")
cmd.Flags().IntVar(&opts.workerCount, "worker-count", 5, "number of benchmark workers")
cmd.Flags().IntVar(&opts.totalStacks, "total-stacks", 200, "number of stacks created/removed per worker")
cmd.Flags().StringVarP(&opts.format, "format", "f", "auto", "output format: auto|json|interactive|report")
cmd.Flags().StringVar(&opts.collectLogsNamespace, "logs-namespace", "", "namespace to collect Compose on Kubernetes logs from")
cmd.Flags().DurationVar(&opts.maxDuration, "max-duration", 0, "maximum duration of the benchmark (fails if exceeded)")
if err := cmd.Execute(); err != nil {
panic(err)
}
}
func run(opts *options) error {
ctx := context.Background()
if opts.maxDuration > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, opts.maxDuration)
defer cancel()
}
var (
out io.Writer
err error
)
if out, opts.format, err = configureOutput(opts.format); err != nil {
return err
}
restCfg, err := configureRest(opts.kubeconfig)
if err != nil {
return err
}
if opts.collectLogsNamespace != "" {
defer collectLogsToStderr(restCfg, opts.collectLogsNamespace)
}
if err := ensureInstalled(restCfg); err != nil {
return err
}
start := time.Now()
eg, _ := errgroup.WithContext(ctx)
var states []*workerState
stacksPerWorker := opts.totalStacks / opts.workerCount
for workerIX := 0; workerIX < opts.workerCount; workerIX++ {
workerID := fmt.Sprintf("bench-worker-%d", workerIX)
state := &workerState{
ID: workerID,
}
states = append(states, state)
stacksForThisWorker := stacksPerWorker
if workerIX < (opts.totalStacks % opts.workerCount) {
stacksForThisWorker++
}
eg.Go(func() error {
return benchmarkRun(restCfg, workerID, stacksForThisWorker, func(u stateUpdater) {
state.Lock()
defer state.Unlock()
u(state)
})
})
}
finishedC := make(chan error)
go func() {
defer close(finishedC)
finishedC <- eg.Wait()
}()
return reportBenchStatus(ctx, out, finishedC, start, opts.format, states)
}
func configureOutput(format string) (io.Writer, string, error) {
switch format {
case "interactive", "auto":
c, err := console.ConsoleFromFile(os.Stdout)
if err != nil {
if format == "auto" {
return os.Stdout, "report", nil
}
return nil, "", errors.Wrapf(err, "unable to set interactive console")
}
return c, "interactive", nil
case "json", "report":
return os.Stdout, format, nil
}
return nil, "", errors.Errorf("unexpected format %s. must be auto, json, interactive or report", format)
}
func configureRest(kubeconfig string) (*rest.Config, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.ExplicitPath = kubeconfig
cmdConfig, err := loadingRules.Load()
if err != nil {
return nil, err
}
clientCfg := clientcmd.NewDefaultClientConfig(*cmdConfig, &clientcmd.ConfigOverrides{})
return clientCfg.ClientConfig()
}
func collectLogsToStderr(cfg *rest.Config, ns string) {
client, err := k8sclientset.NewForConfig(cfg)
if err != nil {
panic(err)
}
pods, err := client.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
panic(err)
}
for _, pod := range pods.Items {
for _, cont := range pod.Status.ContainerStatuses {
fmt.Fprintf(os.Stderr, "\nCurrent logs for %s/%s\n", pod.Name, cont.Name)
data, err := client.CoreV1().Pods(ns).GetLogs(pod.Name, &coretypes.PodLogOptions{Container: cont.Name}).Stream()
if err != nil {
panic(err)
}
io.Copy(os.Stderr, data)
}
}
}
func ensureInstalled(config *rest.Config) error {
stackclient, err := clientset.NewForConfig(config)
if err != nil {
return err
}
if _, err := stackclient.Stacks(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
// installed
return nil
}
tag := os.Getenv("TAG")
if tag == "" {
return errors.New("stacks API is not installed and TAG env var is not set. Cannot install")
}
k8sclient, err := k8sclientset.NewForConfig(config)
if err != nil {
return err
}
if _, err := k8sclient.CoreV1().Namespaces().Get("benchmark", metav1.GetOptions{}); err != nil {
if kerrors.IsNotFound(err) {
if _, err := k8sclient.CoreV1().Namespaces().Create(&coretypes.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "benchmark",
},
}); err != nil {
return err
}
} else {
return err
}
}
if err := install.Do(context.Background(), config,
install.WithUnsafe(install.UnsafeOptions{
OptionsCommon: install.OptionsCommon{
Namespace: "benchmark",
Tag: tag,
ReconciliationInterval: constants.DefaultFullSyncInterval,
}}),
install.WithObjectFilter(func(o runtime.Object) (bool, error) {
switch v := o.(type) {
case *appstypes.Deployment:
// change from pull always to pull never (image is already loaded, and not yet on hub)
// only apply to 1st container in POD (2nd container for API is etcd, and we might need to pull it)
v.Spec.Template.Spec.Containers[0].ImagePullPolicy = coretypes.PullNever
}
return true, nil
}),
); err != nil {
return err
}
if err = install.WaitNPods(config, "benchmark", 2, 2*time.Minute); err != nil {
return err
}
return e2ewait.For(300, func() (bool, error) {
_, err := stackclient.Stacks("default").List(metav1.ListOptions{})
return err == nil, err
})
}
|
[
"\"TAG\""
] |
[] |
[
"TAG"
] |
[]
|
["TAG"]
|
go
| 1 | 0 | |
subnet/kube/kube.go
|
// Copyright 2016 flannel authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"strconv"
"time"
"github.com/flannel-io/flannel/pkg/ip"
"github.com/flannel-io/flannel/subnet"
"golang.org/x/net/context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
log "k8s.io/klog"
)
var (
ErrUnimplemented = errors.New("unimplemented")
)
const (
resyncPeriod = 5 * time.Minute
nodeControllerSyncTimeout = 10 * time.Minute
)
type kubeSubnetManager struct {
enableIPv4 bool
enableIPv6 bool
annotations annotations
client clientset.Interface
nodeName string
nodeStore listers.NodeLister
nodeController cache.Controller
subnetConf *subnet.Config
events chan subnet.Event
setNodeNetworkUnavailable bool
}
func NewSubnetManager(ctx context.Context, apiUrl, kubeconfig, prefix, netConfPath string, setNodeNetworkUnavailable bool) (subnet.Manager, error) {
var cfg *rest.Config
var err error
// Try to build kubernetes config from a master url or a kubeconfig filepath. If neither masterUrl
// or kubeconfigPath are passed in we fall back to inClusterConfig. If inClusterConfig fails,
// we fallback to the default config.
cfg, err = clientcmd.BuildConfigFromFlags(apiUrl, kubeconfig)
if err != nil {
return nil, fmt.Errorf("fail to create kubernetes config: %v", err)
}
c, err := clientset.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("unable to initialize client: %v", err)
}
// The kube subnet mgr needs to know the k8s node name that it's running on so it can annotate it.
// If we're running as a pod then the POD_NAME and POD_NAMESPACE will be populated and can be used to find the node
// name. Otherwise, the environment variable NODE_NAME can be passed in.
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
podName := os.Getenv("POD_NAME")
podNamespace := os.Getenv("POD_NAMESPACE")
if podName == "" || podNamespace == "" {
return nil, fmt.Errorf("env variables POD_NAME and POD_NAMESPACE must be set")
}
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error retrieving pod spec for '%s/%s': %v", podNamespace, podName, err)
}
nodeName = pod.Spec.NodeName
if nodeName == "" {
return nil, fmt.Errorf("node name not present in pod spec '%s/%s'", podNamespace, podName)
}
}
netConf, err := ioutil.ReadFile(netConfPath)
if err != nil {
return nil, fmt.Errorf("failed to read net conf: %v", err)
}
sc, err := subnet.ParseConfig(string(netConf))
if err != nil {
return nil, fmt.Errorf("error parsing subnet config: %s", err)
}
sm, err := newKubeSubnetManager(ctx, c, sc, nodeName, prefix)
if err != nil {
return nil, fmt.Errorf("error creating network manager: %s", err)
}
sm.setNodeNetworkUnavailable = setNodeNetworkUnavailable
go sm.Run(context.Background())
log.Infof("Waiting %s for node controller to sync", nodeControllerSyncTimeout)
err = wait.Poll(time.Second, nodeControllerSyncTimeout, func() (bool, error) {
return sm.nodeController.HasSynced(), nil
})
if err != nil {
return nil, fmt.Errorf("error waiting for nodeController to sync state: %v", err)
}
log.Infof("Node controller sync successful")
return sm, nil
}
func newKubeSubnetManager(ctx context.Context, c clientset.Interface, sc *subnet.Config, nodeName, prefix string) (*kubeSubnetManager, error) {
var err error
var ksm kubeSubnetManager
ksm.annotations, err = newAnnotations(prefix)
if err != nil {
return nil, err
}
ksm.enableIPv4 = sc.EnableIPv4
ksm.enableIPv6 = sc.EnableIPv6
ksm.client = c
ksm.nodeName = nodeName
ksm.subnetConf = sc
scale := 5000
scaleStr := os.Getenv("EVENT_QUEUE_DEPTH")
if scaleStr != "" {
n, err := strconv.Atoi(scaleStr)
if err != nil {
return nil, fmt.Errorf("env EVENT_QUEUE_DEPTH=%s format error: %v", scaleStr, err)
}
if n > 0 {
scale = n
}
}
ksm.events = make(chan subnet.Event, scale)
indexer, controller := cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return ksm.client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return ksm.client.CoreV1().Nodes().Watch(ctx, options)
},
},
&v1.Node{},
resyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ksm.handleAddLeaseEvent(subnet.EventAdded, obj)
},
UpdateFunc: ksm.handleUpdateLeaseEvent,
DeleteFunc: func(obj interface{}) {
_, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *api.Node here and we need to handle that correctly.
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Infof("Error received unexpected object: %v", obj)
return
}
node, ok := deletedState.Obj.(*v1.Node)
if !ok {
log.Infof("Error deletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
obj = node
}
ksm.handleAddLeaseEvent(subnet.EventRemoved, obj)
},
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
ksm.nodeController = controller
ksm.nodeStore = listers.NewNodeLister(indexer)
return &ksm, nil
}
func (ksm *kubeSubnetManager) handleAddLeaseEvent(et subnet.EventType, obj interface{}) {
n := obj.(*v1.Node)
if s, ok := n.Annotations[ksm.annotations.SubnetKubeManaged]; !ok || s != "true" {
return
}
l, err := ksm.nodeToLease(*n)
if err != nil {
log.Infof("Error turning node %q to lease: %v", n.ObjectMeta.Name, err)
return
}
ksm.events <- subnet.Event{Type: et, Lease: l}
}
func (ksm *kubeSubnetManager) handleUpdateLeaseEvent(oldObj, newObj interface{}) {
o := oldObj.(*v1.Node)
n := newObj.(*v1.Node)
if s, ok := n.Annotations[ksm.annotations.SubnetKubeManaged]; !ok || s != "true" {
return
}
var changed = true
if ksm.enableIPv4 && o.Annotations[ksm.annotations.BackendData] == n.Annotations[ksm.annotations.BackendData] &&
o.Annotations[ksm.annotations.BackendType] == n.Annotations[ksm.annotations.BackendType] &&
o.Annotations[ksm.annotations.BackendPublicIP] == n.Annotations[ksm.annotations.BackendPublicIP] {
changed = false
}
if ksm.enableIPv6 && o.Annotations[ksm.annotations.BackendV6Data] == n.Annotations[ksm.annotations.BackendV6Data] &&
o.Annotations[ksm.annotations.BackendType] == n.Annotations[ksm.annotations.BackendType] &&
o.Annotations[ksm.annotations.BackendPublicIPv6] == n.Annotations[ksm.annotations.BackendPublicIPv6] {
changed = false
}
if !changed {
return // No change to lease
}
l, err := ksm.nodeToLease(*n)
if err != nil {
log.Infof("Error turning node %q to lease: %v", n.ObjectMeta.Name, err)
return
}
ksm.events <- subnet.Event{Type: subnet.EventAdded, Lease: l}
}
func (ksm *kubeSubnetManager) GetNetworkConfig(ctx context.Context) (*subnet.Config, error) {
return ksm.subnetConf, nil
}
func (ksm *kubeSubnetManager) AcquireLease(ctx context.Context, attrs *subnet.LeaseAttrs) (*subnet.Lease, error) {
cachedNode, err := ksm.nodeStore.Get(ksm.nodeName)
if err != nil {
return nil, err
}
n := cachedNode.DeepCopy()
if n.Spec.PodCIDR == "" {
return nil, fmt.Errorf("node %q pod cidr not assigned", ksm.nodeName)
}
var bd, v6Bd []byte
bd, err = attrs.BackendData.MarshalJSON()
if err != nil {
return nil, err
}
v6Bd, err = attrs.BackendV6Data.MarshalJSON()
if err != nil {
return nil, err
}
var cidr, ipv6Cidr *net.IPNet
_, cidr, err = net.ParseCIDR(n.Spec.PodCIDR)
if err != nil {
return nil, err
}
for _, podCidr := range n.Spec.PodCIDRs {
_, parseCidr, err := net.ParseCIDR(podCidr)
if err != nil {
return nil, err
}
if len(parseCidr.IP) == net.IPv6len {
ipv6Cidr = parseCidr
break
}
}
if (n.Annotations[ksm.annotations.BackendData] != string(bd) ||
n.Annotations[ksm.annotations.BackendType] != attrs.BackendType ||
n.Annotations[ksm.annotations.BackendPublicIP] != attrs.PublicIP.String() ||
n.Annotations[ksm.annotations.SubnetKubeManaged] != "true" ||
(n.Annotations[ksm.annotations.BackendPublicIPOverwrite] != "" && n.Annotations[ksm.annotations.BackendPublicIPOverwrite] != attrs.PublicIP.String())) ||
(attrs.PublicIPv6 != nil &&
(n.Annotations[ksm.annotations.BackendV6Data] != string(v6Bd) ||
n.Annotations[ksm.annotations.BackendType] != attrs.BackendType ||
n.Annotations[ksm.annotations.BackendPublicIPv6] != attrs.PublicIPv6.String() ||
n.Annotations[ksm.annotations.SubnetKubeManaged] != "true" ||
(n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] != "" && n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] != attrs.PublicIPv6.String()))) {
n.Annotations[ksm.annotations.BackendType] = attrs.BackendType
//TODO -i only vxlan and host-gw backends support dual stack now.
if (attrs.BackendType == "vxlan" && string(bd) != "null") || (attrs.BackendType == "wireguard" && string(bd) != "null") || attrs.BackendType != "vxlan" {
n.Annotations[ksm.annotations.BackendData] = string(bd)
if n.Annotations[ksm.annotations.BackendPublicIPOverwrite] != "" {
if n.Annotations[ksm.annotations.BackendPublicIP] != n.Annotations[ksm.annotations.BackendPublicIPOverwrite] {
log.Infof("Overriding public ip with '%s' from node annotation '%s'",
n.Annotations[ksm.annotations.BackendPublicIPOverwrite],
ksm.annotations.BackendPublicIPOverwrite)
n.Annotations[ksm.annotations.BackendPublicIP] = n.Annotations[ksm.annotations.BackendPublicIPOverwrite]
}
} else {
n.Annotations[ksm.annotations.BackendPublicIP] = attrs.PublicIP.String()
}
}
if (attrs.BackendType == "vxlan" && string(v6Bd) != "null") || (attrs.BackendType == "wireguard" && string(v6Bd) != "null" && attrs.PublicIPv6 != nil) || (attrs.BackendType == "host-gw" && attrs.PublicIPv6 != nil) {
n.Annotations[ksm.annotations.BackendV6Data] = string(v6Bd)
if n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] != "" {
if n.Annotations[ksm.annotations.BackendPublicIPv6] != n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] {
log.Infof("Overriding public ipv6 with '%s' from node annotation '%s'",
n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite],
ksm.annotations.BackendPublicIPv6Overwrite)
n.Annotations[ksm.annotations.BackendPublicIPv6] = n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite]
}
} else {
n.Annotations[ksm.annotations.BackendPublicIPv6] = attrs.PublicIPv6.String()
}
}
n.Annotations[ksm.annotations.SubnetKubeManaged] = "true"
oldData, err := json.Marshal(cachedNode)
if err != nil {
return nil, err
}
newData, err := json.Marshal(n)
if err != nil {
return nil, err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return nil, fmt.Errorf("failed to create patch for node %q: %v", ksm.nodeName, err)
}
_, err = ksm.client.CoreV1().Nodes().Patch(ctx, ksm.nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
return nil, err
}
}
if ksm.setNodeNetworkUnavailable {
log.Infoln("Setting NodeNetworkUnavailable")
err = ksm.setNodeNetworkUnavailableFalse(ctx)
if err != nil {
log.Errorf("Unable to set NodeNetworkUnavailable to False for %q: %v", ksm.nodeName, err)
}
} else {
log.Infoln("Skip setting NodeNetworkUnavailable")
}
lease := &subnet.Lease{
Attrs: *attrs,
Expiration: time.Now().Add(24 * time.Hour),
}
if cidr != nil && ksm.enableIPv4 {
lease.Subnet = ip.FromIPNet(cidr)
}
if ipv6Cidr != nil {
lease.IPv6Subnet = ip.FromIP6Net(ipv6Cidr)
}
//TODO - only vxlan, host-gw and wireguard backends support dual stack now.
if attrs.BackendType != "vxlan" && attrs.BackendType != "host-gw" && attrs.BackendType != "wireguard" {
lease.EnableIPv4 = true
lease.EnableIPv6 = false
}
return lease, nil
}
func (ksm *kubeSubnetManager) WatchLeases(ctx context.Context, cursor interface{}) (subnet.LeaseWatchResult, error) {
select {
case event := <-ksm.events:
return subnet.LeaseWatchResult{
Events: []subnet.Event{event},
}, nil
case <-ctx.Done():
return subnet.LeaseWatchResult{}, context.Canceled
}
}
func (ksm *kubeSubnetManager) Run(ctx context.Context) {
log.Infof("Starting kube subnet manager")
ksm.nodeController.Run(ctx.Done())
}
func (ksm *kubeSubnetManager) nodeToLease(n v1.Node) (l subnet.Lease, err error) {
if ksm.enableIPv4 {
l.Attrs.PublicIP, err = ip.ParseIP4(n.Annotations[ksm.annotations.BackendPublicIP])
if err != nil {
return l, err
}
l.Attrs.BackendData = json.RawMessage(n.Annotations[ksm.annotations.BackendData])
_, cidr, err := net.ParseCIDR(n.Spec.PodCIDR)
if err != nil {
return l, err
}
l.Subnet = ip.FromIPNet(cidr)
l.EnableIPv4 = ksm.enableIPv4
}
if ksm.enableIPv6 {
l.Attrs.PublicIPv6, err = ip.ParseIP6(n.Annotations[ksm.annotations.BackendPublicIPv6])
if err != nil {
return l, err
}
l.Attrs.BackendV6Data = json.RawMessage(n.Annotations[ksm.annotations.BackendV6Data])
ipv6Cidr := new(net.IPNet)
log.Infof("Creating the node lease for IPv6. This is the n.Spec.PodCIDRs: %v", n.Spec.PodCIDRs)
for _, podCidr := range n.Spec.PodCIDRs {
_, parseCidr, err := net.ParseCIDR(podCidr)
if err != nil {
return l, err
}
if len(parseCidr.IP) == net.IPv6len {
ipv6Cidr = parseCidr
break
}
}
l.IPv6Subnet = ip.FromIP6Net(ipv6Cidr)
l.EnableIPv6 = ksm.enableIPv6
}
l.Attrs.BackendType = n.Annotations[ksm.annotations.BackendType]
return l, nil
}
// RenewLease: unimplemented
func (ksm *kubeSubnetManager) RenewLease(ctx context.Context, lease *subnet.Lease) error {
return ErrUnimplemented
}
func (ksm *kubeSubnetManager) WatchLease(ctx context.Context, sn ip.IP4Net, sn6 ip.IP6Net, cursor interface{}) (subnet.LeaseWatchResult, error) {
return subnet.LeaseWatchResult{}, ErrUnimplemented
}
func (ksm *kubeSubnetManager) Name() string {
return fmt.Sprintf("Kubernetes Subnet Manager - %s", ksm.nodeName)
}
// Set Kubernetes NodeNetworkUnavailable to false when starting
// https://kubernetes.io/docs/concepts/architecture/nodes/#condition
func (ksm *kubeSubnetManager) setNodeNetworkUnavailableFalse(ctx context.Context) error {
condition := v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse,
Reason: "FlannelIsUp",
Message: "Flannel is running on this node",
LastTransitionTime: metav1.Now(),
LastHeartbeatTime: metav1.Now(),
}
raw, err := json.Marshal(&[]v1.NodeCondition{condition})
if err != nil {
return err
}
patch := []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw))
_, err = ksm.client.CoreV1().Nodes().PatchStatus(ctx, ksm.nodeName, patch)
return err
}
|
[
"\"NODE_NAME\"",
"\"POD_NAME\"",
"\"POD_NAMESPACE\"",
"\"EVENT_QUEUE_DEPTH\""
] |
[] |
[
"POD_NAMESPACE",
"NODE_NAME",
"POD_NAME",
"EVENT_QUEUE_DEPTH"
] |
[]
|
["POD_NAMESPACE", "NODE_NAME", "POD_NAME", "EVENT_QUEUE_DEPTH"]
|
go
| 4 | 0 | |
pkg/updatecheck/updatecheck_test.go
|
package updatecheck
import (
"fmt"
"path/filepath"
"testing"
"time"
"os"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/ddev/pkg/version"
asrt "github.com/stretchr/testify/assert"
)
const testOrg = "drud"
const testRepo = "ddev"
// TestGetContainerHealth tests the function for processing container readiness.
func TestUpdateNeeded(t *testing.T) {
assert := asrt.New(t)
tmpdir := testcommon.CreateTmpDir("TestUpdateNeeded")
updateFile := filepath.Join(tmpdir, ".update")
// Ensure updates are required when the update file doesn't exist yet.
updateRequired, err := IsUpdateNeeded(updateFile, 60*time.Second)
assert.True(updateRequired, "Update is required when the update file does not exist")
assert.NoError(err)
// Ensure updates are not required when the update duration is impossibly far in the future.
updateRequired, err = IsUpdateNeeded(updateFile, 9999999*time.Second)
assert.False(updateRequired, "Update is not required when the update interval has not been met")
assert.NoError(err)
time.Sleep(2 * time.Second)
// Ensure updates are required for a duration lower than the sleep.
updateRequired, err = IsUpdateNeeded(updateFile, 1*time.Second)
assert.True(updateRequired, "Update is required after the update interval has passed")
assert.NoError(err)
testcommon.CleanupDir(tmpdir)
}
// TestIsReleaseVersion tests isReleaseVersion to ensure it correctly picks up on release builds vs dev builds
func TestIsReleaseVersion(t *testing.T) {
assert := asrt.New(t)
var versionTests = []struct {
in string
out bool
}{
{"0.1.0", true},
{"v0.1.0", true},
{"v19.99.99", true},
{"v1.17.0-alpha1", true},
{"v1.18.0-alpha4-43-gb5ff9108-dirty", false},
{"v1.18.0-alpha4-43-gb5ff9108", false},
{"19.99.99-8us8dfgh7-dirty", false},
{"v0.3-7-g3ca5586-dirty", false},
}
for _, tt := range versionTests {
result := isReleaseVersion(tt.in)
assert.Equal(result, tt.out, fmt.Sprintf("Got output which was not expected from isReleaseVersion. Input: %s Output: %t Expected: %t", tt.in, result, tt.out))
}
}
// TestAvailableUpdates tests isReleaseVersion to ensure it correctly picks up on release builds vs dev builds
func TestAvailableUpdates(t *testing.T) {
assert := asrt.New(t)
if os.Getenv("GOTEST_SHORT") != "" {
t.Skip("Skipping TestAvailableUpdates because GOTEST_SHORT env var is set")
}
var versionTests = []struct {
in string
out bool
}{
{"0.0.0", true},
{"v0.1.1", true},
{version.DdevVersion, false},
{"v999999.999999.999999", false},
}
for _, tt := range versionTests {
updateNeeded, _, updateURL, err := AvailableUpdates(testOrg, testRepo, tt.in)
if err != nil {
t.Skipf("AvailableUpdates() failed, err=%v", err)
}
assert.Equal(updateNeeded, tt.out, fmt.Sprintf("Unexpected output from AvailableUpdates. Input: %s Output: %t Expected: %t Org: %s Repo: %s", tt.in, updateNeeded, tt.out, testOrg, testRepo))
if updateNeeded {
assert.Contains(updateURL, "https://")
}
}
}
|
[
"\"GOTEST_SHORT\""
] |
[] |
[
"GOTEST_SHORT"
] |
[]
|
["GOTEST_SHORT"]
|
go
| 1 | 0 | |
cmd/start/start.go
|
package start
import (
"context"
"fmt"
"github.com/hashicorp/yamux"
"github.com/kfsoftware/hlf-cc-dev/gql/models"
"github.com/kfsoftware/hlf-cc-dev/log"
"github.com/kfsoftware/getout/pkg/tunnel"
"github.com/lithammer/shortuuid/v3"
"github.com/pkg/errors"
"github.com/shurcooL/graphql"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"io/ioutil"
"k8s.io/client-go/util/homedir"
"net"
"os"
"path/filepath"
"strings"
)
type Paths struct {
base string
tmp string
}
func mustGetHLFCCPaths() Paths {
base := filepath.Join(homedir.HomeDir(), ".hlf-cc")
if fromEnv := os.Getenv("HLF_CC_ROOT"); fromEnv != "" {
base = fromEnv
log.Infof("using environment override HLF_CC_ROOT=%s", fromEnv)
}
base, err := filepath.Abs(base)
if err != nil {
panic(errors.Wrap(err, "cannot get absolute path"))
}
return Paths{base: base, tmp: os.TempDir()}
}
func (p Paths) CertsDir(chaincode string) string {
return filepath.Join(p.base, "certs", chaincode)
}
const (
startDesc = ``
startExample = ``
)
type startCmd struct {
tenant int
chaincode string
localChaincodeAddress string
tunnelAddress string
apiUrl string
pdcFile string
accessToken string
metaInf string
chaincodeAddress string
chaincodeAddressSubdomain string
}
func (c startCmd) validate() error {
if c.tenant == 0 {
return errors.New("--tenant is required")
}
if c.chaincodeAddress == "" && c.chaincodeAddressSubdomain == "" {
return errors.New("either --chaincode or --chaincodeAddressSubdomain are required")
}
if c.chaincode == "" {
return errors.New("--chaincode is required")
}
if c.tunnelAddress == "" {
return errors.New("--tunnelAddress is required")
}
if c.localChaincodeAddress == "" {
return errors.New("--localChaincodeAddress is required")
}
if c.apiUrl == "" {
return errors.New("--apiUrl is required")
}
if c.metaInf != "" {
if _, err := os.Stat(c.metaInf); os.IsNotExist(err) {
return err
}
}
return nil
}
func ensureDirs(paths ...string) error {
for _, p := range paths {
log.Infof("Ensure creating dir: %q", p)
if err := os.MkdirAll(p, 0755); err != nil {
return errors.Wrapf(err, "failed to ensure create directory %q", p)
}
}
return nil
}
func (c startCmd) run() error {
var err error
p := mustGetHLFCCPaths()
err = ensureDirs(
p.CertsDir(c.chaincode),
)
if err != nil {
return err
}
gqlClient := graphql.NewClient(c.apiUrl, nil)
ctx := context.Background()
chaincodeAddress := c.chaincodeAddress
if c.chaincodeAddressSubdomain != "" {
chaincodeAddressPrefix := strings.ToLower(shortuuid.New())
chaincodeAddress = fmt.Sprintf("%s.%s", chaincodeAddressPrefix, c.chaincodeAddressSubdomain)
}
pdcContents := ""
if c.pdcFile != "" {
pdcContentsBytes,err := ioutil.ReadFile(c.pdcFile)
if err != nil {
return err
}
pdcContents = string(pdcContentsBytes)
}
input := models.DeployChaincodeInput{
Name: c.chaincode,
TenantID: c.tenant,
ChaincodeAddress: chaincodeAddress,
Pdc: pdcContents,
}
var m struct {
DeployChaincode struct {
ChaincodeName string `graphql:"chaincodeName"`
ChannelName string `graphql:"channelName"`
PackageID string `graphql:"packageID"`
Version string `graphql:"version"`
Sequence int `graphql:"sequence"`
PrivateKey string `graphql:"privateKey"`
Certificate string `graphql:"certificate"`
RootCertificate string `graphql:"rootCertificate"`
} `graphql:"deployChaincode(input: $input)"`
}
vars := map[string]interface{}{
"input": input,
}
err = gqlClient.Mutate(ctx, &m, vars)
if err != nil {
return err
}
chaincodeKeyPath := filepath.Join(p.CertsDir(c.chaincode), "chaincode.key")
err = ioutil.WriteFile(chaincodeKeyPath, []byte(m.DeployChaincode.PrivateKey), 0777)
if err != nil {
return err
}
chaincodeCertPath := filepath.Join(p.CertsDir(c.chaincode), "chaincode.pem")
err = ioutil.WriteFile(chaincodeCertPath, []byte(m.DeployChaincode.Certificate), 0777)
if err != nil {
return err
}
caCertPath := filepath.Join(p.CertsDir(c.chaincode), "ca.pem")
err = ioutil.WriteFile(caCertPath, []byte(m.DeployChaincode.RootCertificate), 0777)
if err != nil {
return err
}
dotEnvFile := fmt.Sprintf(`
export CORE_CHAINCODE_ID_NAME=%s
export CORE_CHAINCODE_ADDRESS=%s
export CORE_CHAINCODE_KEY_FILE=%s
export CORE_CHAINCODE_CERT_FILE=%s
export CORE_CHAINCODE_CA_FILE=%s
`, m.DeployChaincode.PackageID, c.localChaincodeAddress, chaincodeKeyPath, chaincodeCertPath, caCertPath)
dotEnvPath := filepath.Join(p.CertsDir(c.chaincode), ".env")
err = ioutil.WriteFile(dotEnvPath, []byte(dotEnvFile), 0777)
if err != nil {
return err
}
sni, _, err := net.SplitHostPort(chaincodeAddress)
if err != nil {
return err
}
log.Infof("Channel: %s Chaincode: %s", m.DeployChaincode.ChaincodeName, m.DeployChaincode.ChannelName)
log.Infof("starting tunnel from %s to %s", c.localChaincodeAddress, chaincodeAddress)
err = startTunnel(
c.tunnelAddress,
c.localChaincodeAddress,
sni,
)
if err != nil {
return err
}
return err
}
func startTunnel(tunnelAddr string, localAddress string, sni string) error {
conn, err := net.Dial("tcp", tunnelAddr)
if err != nil {
panic(err)
}
session, err := yamux.Client(conn, nil)
if err != nil {
panic(err)
}
tunnelCli := tunnel.NewTunnelClient(
session,
localAddress,
)
err = tunnelCli.StartTlsTunnel(sni)
if err != nil {
return err
}
err = tunnelCli.Start()
if err != nil {
return err
}
return nil
}
func NewStartCmd() *cobra.Command {
c := &startCmd{}
cmd := &cobra.Command{
Use: "start",
Short: "Start development for chaincode",
Long: startDesc,
Example: startExample,
RunE: func(cmd *cobra.Command, args []string) error {
var err error
viper.AutomaticEnv()
err = viper.BindEnv("")
if err != nil {
return nil
}
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
if err := c.validate(); err != nil {
return err
}
return c.run()
},
}
f := cmd.Flags()
f.StringVar(&c.chaincodeAddress, "chaincodeAddress", "", "chaincode address to be accessed by the peer(needs to be publicly accessible)")
f.StringVar(&c.chaincodeAddressSubdomain, "chaincodeAddressSubdomain", "", "subdomain to be used for chaincode address, in this case, the address is generated automatically <guid>.<chaincodeAddressSubdomain>")
f.IntVar(&c.tenant, "tenant", 0, "tenant id")
f.StringVar(&c.chaincode, "chaincode", "", "chaincode name within the channel")
f.StringVar(&c.localChaincodeAddress, "localChaincode", "", "address of the local chaincode server, example: localhost:9999")
f.StringVar(&c.apiUrl, "apiUrl", "", "apiUrl to interact with the peers")
f.StringVar(&c.pdcFile, "pdc", "", "pdc file json, see examples/pdc.json")
f.StringVar(&c.tunnelAddress, "tunnelAddress", "", "address of the local chaincode server, example: localhost:9999")
f.StringVar(&c.accessToken, "accessToken", "", "access token")
f.StringVar(&c.metaInf, "metaInf", "", "metadata")
return cmd
}
|
[
"\"HLF_CC_ROOT\""
] |
[] |
[
"HLF_CC_ROOT"
] |
[]
|
["HLF_CC_ROOT"]
|
go
| 1 | 0 | |
client/listers/azurerm/v1alpha1/recoveryservicesprotectionpolicyvm.go
|
/*
Copyright The Kubeform Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "kubeform.dev/kubeform/apis/azurerm/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// RecoveryServicesProtectionPolicyVmLister helps list RecoveryServicesProtectionPolicyVms.
type RecoveryServicesProtectionPolicyVmLister interface {
// List lists all RecoveryServicesProtectionPolicyVms in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.RecoveryServicesProtectionPolicyVm, err error)
// RecoveryServicesProtectionPolicyVms returns an object that can list and get RecoveryServicesProtectionPolicyVms.
RecoveryServicesProtectionPolicyVms(namespace string) RecoveryServicesProtectionPolicyVmNamespaceLister
RecoveryServicesProtectionPolicyVmListerExpansion
}
// recoveryServicesProtectionPolicyVmLister implements the RecoveryServicesProtectionPolicyVmLister interface.
type recoveryServicesProtectionPolicyVmLister struct {
indexer cache.Indexer
}
// NewRecoveryServicesProtectionPolicyVmLister returns a new RecoveryServicesProtectionPolicyVmLister.
func NewRecoveryServicesProtectionPolicyVmLister(indexer cache.Indexer) RecoveryServicesProtectionPolicyVmLister {
return &recoveryServicesProtectionPolicyVmLister{indexer: indexer}
}
// List lists all RecoveryServicesProtectionPolicyVms in the indexer.
func (s *recoveryServicesProtectionPolicyVmLister) List(selector labels.Selector) (ret []*v1alpha1.RecoveryServicesProtectionPolicyVm, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.RecoveryServicesProtectionPolicyVm))
})
return ret, err
}
// RecoveryServicesProtectionPolicyVms returns an object that can list and get RecoveryServicesProtectionPolicyVms.
func (s *recoveryServicesProtectionPolicyVmLister) RecoveryServicesProtectionPolicyVms(namespace string) RecoveryServicesProtectionPolicyVmNamespaceLister {
return recoveryServicesProtectionPolicyVmNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// RecoveryServicesProtectionPolicyVmNamespaceLister helps list and get RecoveryServicesProtectionPolicyVms.
type RecoveryServicesProtectionPolicyVmNamespaceLister interface {
// List lists all RecoveryServicesProtectionPolicyVms in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.RecoveryServicesProtectionPolicyVm, err error)
// Get retrieves the RecoveryServicesProtectionPolicyVm from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.RecoveryServicesProtectionPolicyVm, error)
RecoveryServicesProtectionPolicyVmNamespaceListerExpansion
}
// recoveryServicesProtectionPolicyVmNamespaceLister implements the RecoveryServicesProtectionPolicyVmNamespaceLister
// interface.
type recoveryServicesProtectionPolicyVmNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all RecoveryServicesProtectionPolicyVms in the indexer for a given namespace.
func (s recoveryServicesProtectionPolicyVmNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RecoveryServicesProtectionPolicyVm, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.RecoveryServicesProtectionPolicyVm))
})
return ret, err
}
// Get retrieves the RecoveryServicesProtectionPolicyVm from the indexer for a given namespace and name.
func (s recoveryServicesProtectionPolicyVmNamespaceLister) Get(name string) (*v1alpha1.RecoveryServicesProtectionPolicyVm, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("recoveryservicesprotectionpolicyvm"), name)
}
return obj.(*v1alpha1.RecoveryServicesProtectionPolicyVm), nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
labs_challenge/wsgi.py
|
"""
WSGI config for labs_challenge project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'labs_challenge.settings.development')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
h2o-py/tests/pyunit_utils/utilsPY.py
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.builtins import basestring
import sys, os
import numpy as np
import operator
try: # works with python 2.7 not 3
from StringIO import StringIO
except: # works with python 3
from io import StringIO
sys.path.insert(1, "../../")
import h2o
import imp
import random
import re
import subprocess
from subprocess import STDOUT,PIPE
from h2o.utils.shared_utils import temp_ctr
from h2o.model.binomial import H2OBinomialModel
from h2o.model.clustering import H2OClusteringModel
from h2o.model.multinomial import H2OMultinomialModel
from h2o.model.ordinal import H2OOrdinalModel
from h2o.model.regression import H2ORegressionModel
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.kmeans import H2OKMeansEstimator
from h2o.estimators.naive_bayes import H2ONaiveBayesEstimator
from h2o.transforms.decomposition import H2OPCA
from decimal import *
import urllib.request, urllib.error, urllib.parse
import numpy as np
import shutil
import string
import copy
import json
import math
from random import shuffle
import scipy.special
from h2o.utils.typechecks import assert_is_type
def check_models(model1, model2, use_cross_validation=False, op='e'):
"""
Check that the given models are equivalent.
:param model1:
:param model2:
:param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use
training metrics.
:param op: comparison operator to use. 'e':==, 'g':>, 'ge':>=
:return: None. Throw meaningful error messages if the check fails
"""
# 1. Check model types
model1_type = model1.__class__.__name__
model2_type = model1.__class__.__name__
assert model1_type is model2_type, "The model types differ. The first model is of type {0} and the second " \
"models is of type {1}.".format(model1_type, model2_type)
# 2. Check model metrics
if isinstance(model1,H2OBinomialModel): # 2a. Binomial
# F1
f1_1 = model1.F1(xval=use_cross_validation)
f1_2 = model2.F1(xval=use_cross_validation)
if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1])
elif isinstance(model1,H2ORegressionModel): # 2b. Regression
# MSE
mse1 = model1.mse(xval=use_cross_validation)
mse2 = model2.mse(xval=use_cross_validation)
if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be == to the second.".format(mse1, mse2)
elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be > than the second.".format(mse1, mse2)
elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be >= than the second.".format(mse1, mse2)
elif isinstance(model1,H2OMultinomialModel) or isinstance(model1,H2OOrdinalModel): # 2c. Multinomial
# hit-ratio
pass
elif isinstance(model1,H2OClusteringModel): # 2d. Clustering
# totss
totss1 = model1.totss(xval=use_cross_validation)
totss2 = model2.totss(xval=use_cross_validation)
if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be == to the second.".format(totss1,
totss2)
elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be > than the second.".format(totss1,
totss2)
elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be >= than the second." \
"".format(totss1, totss2)
def check_dims_values(python_obj, h2o_frame, rows, cols, dim_only=False):
"""
Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python
object conforms to the rules specified in the h2o frame documentation.
:param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame
:param h2o_frame: an H2OFrame
:param rows: number of rows
:param cols: number of columns
:param dim_only: check the dimensions only
:return: None
"""
h2o_rows, h2o_cols = h2o_frame.dim
assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \
"".format(h2o_rows, rows, h2o_cols, cols)
if not dim_only:
if isinstance(python_obj, (list, tuple)):
for c in range(cols):
for r in range(rows):
pval = python_obj[r]
if isinstance(pval, (list, tuple)): pval = pval[c]
hval = h2o_frame[r, c]
assert pval == hval or abs(pval - hval) < 1e-10, \
"expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval)
elif isinstance(python_obj, dict):
for r in range(rows):
for k in list(python_obj.keys()):
pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k]
hval = h2o_frame[r,k]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval)
def np_comparison_check(h2o_data, np_data, num_elements):
"""
Check values achieved by h2o against values achieved by numpy
:param h2o_data: an H2OFrame or H2OVec
:param np_data: a numpy array
:param num_elements: number of elements to compare
:return: None
"""
# Check for numpy
try:
imp.find_module('numpy')
except ImportError:
assert False, "failed comparison check because unable to import numpy"
import numpy as np
rows, cols = h2o_data.dim
for i in range(num_elements):
r = random.randint(0,rows-1)
c = random.randint(0,cols-1)
h2o_val = h2o_data[r,c]
np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]
if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(
assert np.absolute(h2o_val - np_val) < 1e-5, \
"failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val)
# perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are
# returned.
def mojo_predict(model,tmpdir, mojoname):
"""
perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are returned.
It is assumed that the input data set is saved as in.csv in tmpdir directory.
:param model: h2o model where you want to use to perform prediction
:param tmpdir: directory where your mojo zip files are stired
:param mojoname: name of your mojo zip file.
:return: the h2o prediction frame and the mojo prediction frame
"""
newTest = h2o.import_file(os.path.join(tmpdir, 'in.csv'), header=1) # Make sure h2o and mojo use same in.csv
predict_h2o = model.predict(newTest)
# load mojo and have it do predict
outFileName = os.path.join(tmpdir, 'out_mojo.csv')
mojoZip = os.path.join(tmpdir, mojoname) + ".zip"
genJarDir = str.split(str(tmpdir),'/')
genJarDir = '/'.join(genJarDir[0:genJarDir.index('h2o-py')]) # locate directory of genmodel.jar
java_cmd = ["java", "-ea", "-cp", os.path.join(genJarDir, "h2o-assemblies/genmodel/build/libs/genmodel.jar"),
"-Xmx12g", "-XX:MaxPermSize=2g", "-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--input", os.path.join(tmpdir, 'in.csv'), "--output",
outFileName, "--mojo", mojoZip, "--decimal"]
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
pred_mojo = h2o.import_file(os.path.join(tmpdir, 'out_mojo.csv'), header=1) # load mojo prediction into a frame and compare
# os.remove(mojoZip)
return predict_h2o, pred_mojo
# perform pojo predict. Frame containing pojo predict is returned.
def pojo_predict(model, tmpdir, pojoname):
h2o.download_pojo(model, path=tmpdir)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
java_file = os.path.join(tmpdir, pojoname + ".java")
in_csv = (os.path.join(tmpdir, 'in.csv')) # import the test dataset
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", java_file]
subprocess.check_call(javac_cmd)
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--decimal"]
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
predict_pojo = h2o.import_file(out_pojo_csv, header=1)
return predict_pojo
def javapredict(algo, equality, train, test, x, y, compile_only=False, separator=",", setInvNumNA=False,**kwargs):
print("Creating model in H2O")
if algo == "gbm": model = H2OGradientBoostingEstimator(**kwargs)
elif algo == "random_forest": model = H2ORandomForestEstimator(**kwargs)
elif algo == "deeplearning": model = H2ODeepLearningEstimator(**kwargs)
elif algo == "glm": model = H2OGeneralizedLinearEstimator(**kwargs)
elif algo == "naive_bayes": model = H2ONaiveBayesEstimator(**kwargs)
elif algo == "kmeans": model = H2OKMeansEstimator(**kwargs)
elif algo == "pca": model = H2OPCA(**kwargs)
else: raise ValueError
if algo == "kmeans" or algo == "pca": model.train(x=x, training_frame=train)
else: model.train(x=x, y=y, training_frame=train)
print(model)
# HACK: munge model._id so that it conforms to Java class name. For example, change K-means to K_means.
# TODO: clients should extract Java class name from header.
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
pojoname = regex.sub("_", model._id)
print("Downloading Java prediction model code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
h2o.download_pojo(model, path=tmpdir)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Predicting in H2O")
predictions = model.predict(test)
predictions.summary()
predictions.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(predictions, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("H2O Predictions saved in {0}".format(out_h2o_csv))
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test[x], in_csv)
# hack: the PredictCsv driver can't handle quoted strings, so remove them
f = open(in_csv, "r+")
csv = f.read()
csv = re.sub('\"', "", csv)
csv = re.sub(",", separator, csv) # replace with arbitrary separator for input dataset
f.seek(0)
f.write(csv)
f.truncate()
f.close()
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to PredictCsv saved in {0}".format(in_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--separator", separator]
if setInvNumNA:
java_cmd.append("--setConvertInvalidNum")
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
predictions2 = h2o.upload_file(path=out_pojo_csv)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = predictions.dim
pr, pc = predictions2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
for r in range(hr):
hp = predictions[r, 0]
if equality == "numeric":
pp = float.fromhex(predictions2[r, 0])
assert abs(hp - pp) < 1e-4, \
"Expected predictions to be the same (within 1e-4) for row %d, but got %r and %r" % (r, hp, pp)
elif equality == "class":
pp = predictions2[r, 0]
assert hp == pp, "Expected predictions to be the same for row %d, but got %r and %r" % (r, hp, pp)
else:
raise ValueError
def javamunge(assembly, pojoname, test, compile_only=False):
"""
Here's how to use:
assembly is an already fit H2OAssembly;
The test set should be used to compare the output here and the output of the POJO.
"""
print("Downloading munging POJO code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
assembly.to_pojo(pojoname, path=tmpdir, get_jar=True)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test, in_csv)
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to mungedCSV saved in {0}".format(in_csv))
print("Predicting in H2O")
munged = assembly.fit(test)
munged.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(munged, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("Munged frame saved in {0}".format(out_h2o_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.MungeCsv", "--header", "--munger", pojoname,
"--input", in_csv, "--output", out_pojo_csv]
print("JAVA COMMAND: " + " ".join(java_cmd))
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
munged2 = h2o.upload_file(path=out_pojo_csv, col_types=test.types)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = munged.dim
pr, pc = munged2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
import math
import numbers
munged.show()
munged2.show()
for r in range(hr):
for c in range(hc):
hp = munged[r,c]
pp = munged2[r,c]
if isinstance(hp, numbers.Number):
assert isinstance(pp, numbers.Number)
assert (math.fabs(hp-pp) < 1e-8) or (math.isnan(hp) and math.isnan(pp)), "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
else:
assert hp==pp, "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
def locate(path):
"""
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
if (test_is_on_hadoop()):
# Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However,
# in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link.
# Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins
# machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST
# be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/
# are smalldata and bigdata.
p = os.path.realpath(os.path.join("/home/0xdiag/", path))
if not os.path.exists(p): raise ValueError("File not found: " + path)
return p
else:
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while (True):
if (os.path.exists(possible_result)):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if (next_tmp_dir == tmp_dir):
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
def hadoop_namenode_is_accessible():
url = "http://{0}:50070".format(hadoop_namenode())
try:
urllib.urlopen(url)
internal = True
except:
internal = False
return internal
def test_is_on_hadoop():
if hasattr(sys.modules["tests.pyunit_utils"], '__on_hadoop__'):
return sys.modules["tests.pyunit_utils"].__on_hadoop__
return False
def hadoop_namenode():
if os.getenv("NAME_NODE"):
return os.getenv("NAME_NODE").split(".")[0]
elif hasattr(sys.modules["tests.pyunit_utils"], '__hadoop_namenode__'):
return sys.modules["tests.pyunit_utils"].__hadoop_namenode__
return None
def pyunit_exec(test_name):
with open(test_name, "r") as t: pyunit = t.read()
pyunit_c = compile(pyunit, os.path.abspath(test_name), 'exec')
exec(pyunit_c, {})
def standalone_test(test):
h2o.init(strict_version_check=False)
h2o.remove_all()
h2o.log_and_echo("------------------------------------------------------------")
h2o.log_and_echo("")
h2o.log_and_echo("STARTING TEST")
h2o.log_and_echo("")
h2o.log_and_echo("------------------------------------------------------------")
test()
def make_random_grid_space(algo, ncols=None, nrows=None):
"""
Construct a dictionary of the form {gbm_parameter:list_of_values, ...}, which will eventually be passed to
H2OGridSearch to build a grid object. The gbm parameters, and their associated values, are randomly selected.
:param algo: a string {"gbm", "rf", "dl", "km", "glm"} representing the algo dimension of the grid space
:param ncols: Used for mtries selection or k (pca)
:param nrows: Used for k (pca)
:return: a dictionary of parameter_name:list_of_values
"""
grid_space = {}
if algo in ["gbm", "rf"]:
if random.randint(0,1): grid_space['ntrees'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['max_depth'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['min_rows'] = random.sample(list(range(1,11)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins'] = random.sample(list(range(2,21)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins_cats'] = random.sample(list(range(2,1025)),random.randint(2,3))
if algo == "gbm":
if random.randint(0,1): grid_space['learn_rate'] = [random.random() for _ in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli', 'multinomial', 'gaussian', 'poisson', 'tweedie', 'gamma'], 1)
if algo == "rf":
if random.randint(0,1): grid_space['mtries'] = random.sample(list(range(1,ncols+1)),random.randint(2,3))
if random.randint(0,1): grid_space['sample_rate'] = [random.random() for r in range(random.randint(2,3))]
elif algo == "km":
grid_space['k'] = random.sample(list(range(1,10)),random.randint(2,3))
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['standardize'] = [True, False]
if random.randint(0,1): grid_space['seed'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['init'] = random.sample(['Random','PlusPlus','Furthest'],random.randint(2,3))
elif algo == "glm":
if random.randint(0,1): grid_space['alpha'] = [random.random() for r in range(random.randint(2,3))]
grid_space['family'] = random.sample(['binomial','gaussian','poisson','tweedie','gamma'], 1)
if grid_space['family'] == "tweedie":
if random.randint(0,1):
grid_space['tweedie_variance_power'] = [round(random.random()+1,6) for r in range(random.randint(2,3))]
grid_space['tweedie_link_power'] = 1 - grid_space['tweedie_variance_power']
elif algo == "dl":
if random.randint(0,1): grid_space['activation'] = \
random.sample(["Rectifier", "Tanh", "TanhWithDropout", "RectifierWithDropout", "MaxoutWithDropout"],
random.randint(2,3))
if random.randint(0,1): grid_space['l2'] = [0.001*random.random() for r in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli','multinomial','gaussian','poisson','tweedie','gamma'],1)
return grid_space
elif algo == "naiveBayes":
grid_space['laplace'] = 0
if random.randint(0,1): grid_space['laplace'] = [round(random.random() + r, 6) for r in random.sample(list(range(0,11)), random.randint(2,3))]
if random.randint(0,1): grid_space['min_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
if random.randint(0,1): grid_space['eps_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
elif algo == "pca":
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['transform'] = random.sample(["NONE","STANDARDIZE","NORMALIZE","DEMEAN","DESCALE"], random.randint(2,3))
grid_space['k'] = random.sample(list(range(1,min(ncols,nrows))),random.randint(2,3))
else:
raise ValueError
return grid_space
# Validate given models' parameters against expected values
def expect_model_param(models, attribute_name, expected_values):
print("param: {0}".format(attribute_name))
actual_values = list(set([m.params[attribute_name]['actual'] \
if type(m.params[attribute_name]['actual']) != list
else m.params[attribute_name]['actual'][0] for m in models.models]))
# possible for actual to be a list (GLM)
if type(expected_values) != list:
expected_values = [expected_values]
# limit precision. Rounding happens in some models like RF
actual_values = [x if isinstance(x,basestring) else round(float(x),5) for x in actual_values]
expected_values = [x if isinstance(x,basestring) else round(float(x),5) for x in expected_values]
print("actual values: {0}".format(actual_values))
print("expected values: {0}".format(expected_values))
actual_values_len = len(actual_values)
expected_values_len = len(expected_values)
assert actual_values_len == expected_values_len, "Expected values len: {0}. Actual values len: " \
"{1}".format(expected_values_len, actual_values_len)
actual_values = sorted(actual_values)
expected_values = sorted(expected_values)
for i in range(len(actual_values)):
if isinstance(actual_values[i], float):
assert abs(actual_values[i]-expected_values[i]) < 1.1e-5, "Too large of a difference betewen actual and " \
"expected value. Actual value: {}. Expected value: {}"\
.format(actual_values[i], expected_values[i])
else:
assert actual_values[i] == expected_values[i], "Expected: {}. Actual: {}"\
.format(expected_values[i], actual_values[i])
def rest_ctr():
return h2o.connection().requests_count
def write_syn_floating_point_dataset_glm(csv_training_data_filename, csv_validation_data_filename,
csv_test_data_filename, csv_weight_name, row_count, col_count, data_type,
max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, class_number=2,
class_method=('probability', 'probability', 'probability'),
class_margin=[0.0, 0.0, 0.0]):
"""
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to
null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null string if
no test data set is to be generated.
:param csv_weight_name: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param class_number: integer, optional, representing number of classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# generate bias b and weight as a column vector
weights = generate_weights_glm(csv_weight_name, col_count, data_type, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_glm(csv_training_data_filename, row_count, col_count, min_p_value, max_p_value, data_type,
family_type, noise_std, weights,
class_method=class_method[0], class_margin=class_margin[0], weightChange=True)
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_glm(csv_validation_data_filename, valid_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_glm(csv_test_data_filename, test_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[2], class_margin=class_margin[2])
def write_syn_mixed_dataset_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot,
csv_validation_data_filename, csv_validation_filename_true_one_hot,
csv_test_data_filename, csv_test_filename_true_one_hot, csv_weight_filename, row_count,
col_count, max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, enum_col, enum_level_vec, class_number=2,
class_method=['probability', 'probability', 'probability'],
class_margin=[0.0, 0.0, 0.0]):
"""
This function differs from write_syn_floating_point_dataset_glm in one small point. The predictors in this case
contains categorical data as well as real data.
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to null
string if no training data set is to be generated.
:param csv_training_data_filename_true_one_hot: string representing full path filename to store training data set
with true one-hot encoding. Set to null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_validation_filename_true_one_hot: string representing full path filename to store validation data set
with true one-hot. Set to null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null
string if no test data set is to be generated.
:param csv_test_filename_true_one_hot: string representing full path filename to store test data set with true
one-hot encoding. Set to null string if no test data set is to be generated.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# add column count of encoded categorical predictors, if maximum value for enum is 3, it has 4 levels.
# hence 4 bits are used to encode it with true one hot encoding. That is why we are adding 1 bit per
# categorical columns added to our predictors
new_col_count = col_count - enum_col + sum(enum_level_vec) + enum_level_vec.shape[0]
# generate the weights to be applied to the training/validation/test data sets
# this is for true one hot encoding. For reference+one hot encoding, will skip
# few extra weights
weights = generate_weights_glm(csv_weight_filename, new_col_count, 2, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_mixed_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot, row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[0], class_margin=class_margin[0], weightChange=True)
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_mixed_glm(csv_validation_data_filename, csv_validation_filename_true_one_hot,
valid_row_count, col_count, min_p_value, max_p_value, family_type, noise_std,
weights, enum_col, enum_level_vec, class_number=class_number,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_mixed_glm(csv_test_data_filename, csv_test_filename_true_one_hot, test_row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[2], class_margin=class_margin[2])
def generate_weights_glm(csv_weight_filename, col_count, data_type, min_w_value, max_w_value, family_type='gaussian',
class_number=2):
"""
Generate random intercept and weight vectors (integer or real) for GLM algo and save
the values in a file specified by csv_weight_filename.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param family_type: string ,optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param class_number: integer, optional, representing number classes for binomial and multinomial
:return: column vector of size 1+colCount representing intercept and weight or matrix of size
1+colCount by class_number
"""
# first generate random intercept and weight
if 'gaussian' in family_type.lower():
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [col_count+1, 1])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [col_count+1, 1])
else:
assert False, "dataType must be 1 or 2 for now."
elif ('binomial' in family_type.lower()) or ('multinomial' in family_type.lower()
or ('ordinal' in family_type.lower())):
if 'binomial' in family_type.lower(): # for binomial, only need 1 set of weight
class_number -= 1
if class_number <= 0:
assert False, "class_number must be >= 2!"
if isinstance(col_count, np.ndarray):
temp_col_count = col_count[0]
else:
temp_col_count = col_count
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [temp_col_count+1, class_number])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [temp_col_count+1, class_number])
else:
assert False, "dataType must be 1 or 2 for now."
# special treatment for ordinal weights
if 'ordinal' in family_type.lower():
num_pred = len(weight)
for index in range(class_number):
weight[0,index] = 0
for indP in range(1,num_pred):
weight[indP,index] = weight[indP,0] # make sure betas for all classes are the same
np.savetxt(csv_weight_filename, weight.transpose(), delimiter=",")
return weight
def generate_training_set_glm(csv_filename, row_count, col_count, min_p_value, max_p_value, data_type, family_type,
noise_std, weight, class_method='probability', class_margin=0.0, weightChange=False):
"""
Generate supervised data set given weights for the GLM algo. First randomly generate the predictors, then
call function generate_response_glm to generate the corresponding response y using the formula: y = w^T x+b+e
where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between
the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)).
For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector
X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)). The predictors and
responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family-type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
if data_type == 1: # generate random integers
x_mat = np.random.random_integers(min_p_value, max_p_value, [row_count, col_count])
elif data_type == 2: # generate random real numbers
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count])
else:
assert False, "dataType must be 1 or 2 for now. "
# generate the response vector to the input predictors
response_y = generate_response_glm(weight, x_mat, noise_std, family_type,
class_method=class_method, class_margin=class_margin, weightChange=weightChange)
# for family_type = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this data sample before proceeding
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()) or ('ordinal' in family_type.lower()):
if 'threshold' in class_method.lower():
if np.any(response_y < 0): # remove negative entries out of data set
(x_mat, response_y) = remove_negative_response(x_mat, response_y)
# write to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def generate_clusters(cluster_center_list, cluster_pt_number_list, cluster_radius_list):
"""
This function is used to generate clusters of points around cluster_centers listed in
cluster_center_list. The radius of the cluster of points are specified by cluster_pt_number_list.
The size of each cluster could be different and it is specified in cluster_radius_list.
:param cluster_center_list: list of coordinates of cluster centers
:param cluster_pt_number_list: number of points to generate for each cluster center
:param cluster_radius_list: list of size of each cluster
:return: list of sample points that belong to various clusters
"""
k = len(cluster_pt_number_list) # number of clusters to generate clusters for
if (not(k == len(cluster_center_list))) or (not(k == len(cluster_radius_list))):
assert False, "Length of list cluster_center_list, cluster_pt_number_list, cluster_radius_list must be the same!"
training_sets = []
for k_ind in range(k):
new_cluster_data = generate_one_cluster(cluster_center_list[k_ind], cluster_pt_number_list[k_ind],
cluster_radius_list[k_ind])
if k_ind > 0:
training_sets = np.concatenate((training_sets, new_cluster_data), axis=0)
else:
training_sets = new_cluster_data
# want to shuffle the data samples so that the clusters are all mixed up
map(np.random.shuffle, training_sets)
return training_sets
def generate_one_cluster(cluster_center, cluster_number, cluster_size):
"""
This function will generate a full cluster wither cluster_number points centered on cluster_center
with maximum radius cluster_size
:param cluster_center: python list denoting coordinates of cluster center
:param cluster_number: integer denoting number of points to generate for this cluster
:param cluster_size: float denoting radius of cluster
:return: np matrix denoting a cluster
"""
pt_dists = np.random.uniform(0, cluster_size, [cluster_number, 1])
coord_pts = len(cluster_center) # dimension of each cluster point
one_cluster_data = np.zeros((cluster_number, coord_pts), dtype=np.float)
for p_ind in range(cluster_number):
coord_indices = list(range(coord_pts))
random.shuffle(coord_indices) # randomly determine which coordinate to generate
left_radius = pt_dists[p_ind]
for c_ind in range(coord_pts):
coord_index = coord_indices[c_ind]
one_cluster_data[p_ind, coord_index] = random.uniform(-1*left_radius+cluster_center[coord_index],
left_radius+cluster_center[coord_index])
left_radius = math.sqrt(pow(left_radius, 2)-pow((one_cluster_data[p_ind, coord_index]-
cluster_center[coord_index]), 2))
return one_cluster_data
def remove_negative_response(x_mat, response_y):
"""
Recall that when the user chooses to generate a data set for multinomial or binomial using the 'threshold' method,
response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded. However, when we
generate the data set, we keep all samples. For data sample with maximum class probability that fails to be
greater by margin than the second highest class probability, the response is set to be -1. This function will
remove all data samples (predictors and responses) with response set to -1.
:param x_mat: predictor matrix containing all predictor values
:param response_y: response that can be negative if that data sample is to be removed
:return: tuple containing x_mat, response_y with negative data samples removed.
"""
y_response_negative = np.where(response_y < 0) # matrix of True or False
x_mat = np.delete(x_mat,y_response_negative[0].transpose(),axis=0) # remove predictor row with negative response
# remove rows with negative response
response_y = response_y[response_y >= 0]
return x_mat,response_y.transpose()
def generate_training_set_mixed_glm(csv_filename, csv_filename_true_one_hot, row_count, col_count, min_p_value,
max_p_value, family_type, noise_std, weight, enum_col, enum_level_vec,
class_number=2, class_method='probability', class_margin=0.0, weightChange=False):
"""
Generate supervised data set given weights for the GLM algo with mixed categorical and real value
predictors. First randomly generate the predictors, then call function generate_response_glm to generate the
corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise
added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to
be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between
the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) e is the random Gaussian noise added to the
response. The predictors and responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param csv_filename_true_one_hot: string representing full path filename to store data set with true one-hot
encoding.
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with
the maximum class probability if the maximum class probability exceeds the second highest class probability by
the value set in margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data set sample. This field is only
meaningful if class_method is set to 'threshold'
:return: None
"""
# generate the random training data sets
enum_dataset = np.zeros((row_count, enum_col), dtype=np.int) # generate the categorical predictors
# generate categorical data columns
for indc in range(enum_col):
enum_dataset[:, indc] = np.random.random_integers(0, enum_level_vec[indc], row_count)
# generate real data columns
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count-enum_col])
x_mat = np.concatenate((enum_dataset, x_mat), axis=1) # concatenate categorical and real predictor columns
if len(csv_filename_true_one_hot) > 0:
generate_and_save_mixed_glm(csv_filename_true_one_hot, x_mat, enum_level_vec, enum_col, True, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange)
if len(csv_filename) > 0:
generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, False, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=False)
def generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, true_one_hot, weight, noise_std,
family_type, class_method='probability', class_margin=0.0, weightChange=False):
"""
Given the weights and input data matrix with mixed categorical and real value predictors, this function will
generate a supervised data set and save the input data and response in a csv format file specified by
csv_filename. It will first encode the enums without using one hot encoding with or without a reference
level first before generating a response Y.
:param csv_filename: string representing full path filename to store supervised data set with reference level
plus true one-hot encoding.
:param x_mat: predictor matrix with mixed columns (categorical/real values)
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param enum_col: integer representing actual number of categorical columns in data set
:param true_one_hot: bool indicating whether we are using true one hot encoding or reference level plus
one hot encoding
:param weight: vector representing w in our formula to generate the response
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data sample. This field is only meaningful if
class_method is set to 'threshold'
:return: None
"""
# encode the enums
x_mat_encoded = encode_enum_dataset(x_mat, enum_level_vec, enum_col, true_one_hot, False)
# extract the correct weight dimension for the data set
if not true_one_hot:
(num_row, num_col) = x_mat_encoded.shape
weight = weight[0:num_col+1] # +1 to take care of the intercept term
# generate the corresponding response vector given the weight and encoded input predictors
response_y = generate_response_glm(weight, x_mat_encoded, noise_std, family_type,
class_method=class_method, class_margin=class_margin, weightChange=weightChange)
# for familyType = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this before proceeding
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
if 'threshold' in class_method.lower():
(x_mat,response_y) = remove_negative_response(x_mat, response_y)
# write generated data set to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def encode_enum_dataset(dataset, enum_level_vec, enum_col, true_one_hot, include_nans):
"""
Given 2-d numpy array of predictors with categorical and real columns, this function will
encode the enum columns with 1-hot encoding or with reference plus one hot encoding
:param dataset: 2-d numpy array of predictors with both categorical and real columns
:param enum_level_vec: vector containing maximum level for each categorical column
:param enum_col: number of categorical columns in the data set
:param true_one_hot: bool indicating if we are using true one hot encoding or with one reference level + one hot
encoding
:param include_nans: bool indicating if we have nans in categorical columns
:return: data set with categorical columns encoded with 1-hot encoding or 1-hot encoding plus reference
"""
(num_row, num_col) = dataset.shape
# split the data set into categorical and real parts
enum_arrays = dataset[:, 0:enum_col]
new_enum_arrays = []
# perform the encoding for each element of categorical part
for indc in range(enum_col):
enum_col_num = enum_level_vec[indc]+1
if not true_one_hot:
enum_col_num -= 1
if include_nans and np.any(enum_arrays[:, indc]):
enum_col_num += 1
new_temp_enum = np.zeros((num_row, enum_col_num[0]))
one_hot_matrix = one_hot_encoding(enum_col_num)
last_col_index = enum_col_num-1
# encode each enum using 1-hot encoding or plus reference value
for indr in range(num_row):
enum_val = enum_arrays[indr, indc]
if true_one_hot: # not using true one hot
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 0, last_col_index)
else:
if enum_val:
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 1, last_col_index)
if indc == 0:
new_enum_arrays = new_temp_enum
else:
new_enum_arrays = np.concatenate((new_enum_arrays, new_temp_enum), axis=1)
return np.concatenate((new_enum_arrays, dataset[:, enum_col:num_col]), axis=1)
def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index):
"""
Generate encoded bits for a categorical data value using one hot encoding.
:param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding
:param enum_val: categorical data value, could be np.nan
:param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding
:param last_col_index: index into encoding for np.nan if exists
:return: vector representing the encoded values for a enum value
"""
if np.isnan(enum_val): # if data value is np.nan
return one_hot_matrix[last_col_index]
else:
return one_hot_matrix[int(enum_val-add_value)]
def one_hot_encoding(enum_level):
"""
Generate the one_hot_encoding matrix given the number of enum_level.
:param enum_level: generate the actual one-hot encoding matrix
:return: numpy array for the enum_level specified. Note, enum_level <= 6
"""
if enum_level >= 2:
base_array = np.array([[0, 1], [1, 0]]) # for 2 enum levels
for enum_index in range(3, enum_level+1): # loop to build encoding for enum levels > 2
(num_row, num_col) = base_array.shape
col_zeros = np.asmatrix(np.zeros(num_row)).transpose() # column of zero matrix
base_array = np.concatenate((col_zeros, base_array), axis=1) # add column of zero
row_zeros = np.asmatrix(np.zeros(num_row+1)) # add row of zeros
row_zeros[0, 0] = 1 # set first element to 1
base_array = np.concatenate((base_array, row_zeros), axis=0)
return base_array
else:
assert False, "enum_level must be >= 2."
def generate_response_glm(weight, x_mat, noise_std, family_type, class_method='probability',
class_margin=0.0, weightChange=False, even_distribution=True):
"""
Generate response vector given weight matrix, predictors matrix for the GLM algo.
:param weight: vector representing w in our formula to generate the response
:param x_mat: random numpy matrix (2-D ndarray) containing the predictors
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (Gaussian, multinomial, binomial)
supported by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial familyType. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data set sample. This field is only meaningful if
class_method is set to 'threshold'
:return: vector representing the response
"""
(num_row, num_col) = x_mat.shape
temp_ones_col = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones_col, x_mat), axis=1)
response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])
if 'ordinal' in family_type.lower():
(num_sample, num_class) = response_y.shape
lastClass = num_class - 1
if weightChange:
tresp = []
# generate the new y threshold
for indP in range(num_sample):
tresp.append(-response_y[indP,0])
tresp.sort()
num_per_class = int(len(tresp)/num_class)
if (even_distribution):
for indC in range(lastClass):
weight[0,indC] = tresp[(indC+1)*num_per_class]
else: # do not generate evenly distributed class, generate randomly distributed classes
splitInd = []
lowV = 0.1
highV = 1
v1 = 0
acc = 0
for indC in range(lastClass):
tempf = random.uniform(lowV, highV)
splitInd.append(v1+int(tempf*num_per_class))
v1 = splitInd[indC] # from last class
acc += 1-tempf
highV = 1+acc
for indC in range(lastClass): # put in threshold
weight[0,indC] = tresp[splitInd[indC]]
response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])
discrete_y = np.zeros((num_sample, 1), dtype=np.int)
for indR in range(num_sample):
discrete_y[indR, 0] = lastClass
for indC in range(lastClass):
if (response_y[indR, indC] >= 0):
discrete_y[indR, 0] = indC
break
return discrete_y
# added more to form Multinomial response
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
temp_mat = np.exp(response_y) # matrix of n by K where K = 1 for binomials
if 'binomial' in family_type.lower():
ntemp_mat = temp_mat + 1
btemp_mat = temp_mat / ntemp_mat
temp_mat = np.concatenate((1-btemp_mat, btemp_mat), axis=1) # inflate temp_mat to 2 classes
response_y = derive_discrete_response(temp_mat, class_method, class_margin, family_type)
return response_y
def derive_discrete_response(prob_mat, class_method, class_margin, family_type='binomial'):
"""
This function is written to generate the final class response given the probabilities (Prob(y=k)). There are
two methods that we use and is specified by the class_method. If class_method is set to 'probability',
response y is generated randomly according to the class probabilities calculated. If set to 'threshold',
response y is set to the class with the maximum class probability if the maximum class probability exceeds the
second highest class probability by the value set in margin. If the maximum class probability fails to be
greater by margin than the second highest class probability, the data sample will be discarded later by
marking the final response as -1.
:param prob_mat: probability matrix specifying the probability that y=k where k is a class
:param class_method: string set to 'probability' or 'threshold'
:param class_margin: if class_method='threshold', class_margin is the margin used to determine if a response is to
be kept or discarded.
:return: response vector representing class of y or -1 if an data sample is to be discarded.
"""
(num_sample, num_class) = prob_mat.shape
lastCat = num_class-1
if 'probability' in class_method.lower():
prob_mat = normalize_matrix(prob_mat)
discrete_y = np.zeros((num_sample, 1), dtype=np.int)
if 'probability' in class_method.lower():
if 'ordinal' not in family_type.lower():
prob_mat = np.cumsum(prob_mat, axis=1)
else: # for ordinal
for indR in list(range(num_sample)):
for indC in list(range(num_class)):
prob_mat[indR, indC] = prob_mat[indR,indC]/prob_mat[indR,lastCat]
random_v = np.random.uniform(0, 1, [num_sample, 1])
# choose the class that final response y belongs to according to the
# probability prob(y=k)
class_bool = random_v < prob_mat
for indR in range(num_sample):
for indC in range(num_class):
if class_bool[indR, indC]:
discrete_y[indR, 0] = indC
break
elif 'threshold' in class_method.lower():
discrete_y = np.argmax(prob_mat, axis=1)
temp_mat = np.diff(np.sort(prob_mat, axis=1), axis=1)
# check if max value exceeds second one by at least margin
mat_diff = temp_mat[:, num_class-2]
mat_bool = mat_diff < class_margin
discrete_y[mat_bool] = -1
else:
assert False, 'class_method should be set to "probability" or "threshold" only!'
return discrete_y
def normalize_matrix(mat):
"""
This function will normalize a matrix across each row such that the row sum is 1.
:param mat: matrix containing prob(y=k)
:return: normalized matrix containing prob(y=k)
"""
(n, K) = mat.shape
kronmat = np.ones((1, K), dtype=float)
row_sum = np.sum(mat, axis=1)
row_sum_mat = np.kron(row_sum, kronmat)
return mat/row_sum_mat
def move_files(dir_path, old_name, new_file, action='move'):
"""
Simple function to move or copy a data set (old_name) to a special directory (dir_path)
with new name (new_file) so that we will be able to re-run the tests if we
have found something wrong with the algorithm under test with the data set.
This is done to avoid losing the data set.
:param dir_path: string representing full directory path where a file is to be moved to
:param old_name: string representing file (filename with full directory path) to be moved to new directory.
:param new_file: string representing the file name of the moved in the new directory
:param action: string, optional, represent the action 'move' or 'copy' file
:return: None
"""
new_name = os.path.join(dir_path, new_file) # generate new filename including directory path
if os.path.isfile(old_name): # only move/copy file old_name if it actually exists
if 'move' in action:
motion = 'mv '
elif 'copy' in action:
motion = 'cp '
else:
assert False, "Illegal action setting. It can only be 'move' or 'copy'!"
cmd = motion+old_name+' '+new_name # generate cmd line string to move the file
subprocess.call(cmd, shell=True)
def remove_files(filename):
"""
Simple function to remove data set saved in filename if the dynamic test is completed with no
error. Some data sets we use can be rather big. This is performed to save space.
:param filename: string representing the file to be removed. Full path is included.
:return: None
"""
cmd = 'rm ' + filename
subprocess.call(cmd, shell=True)
def random_col_duplication(num_cols, duplication_threshold, max_number, to_scale, max_scale_factor):
"""
This function will randomly determine for each column if it should be duplicated.
If it is to be duplicated, how many times, the duplication should be. In addition, a
scaling factor will be randomly applied to each duplicated column if enabled.
:param num_cols: integer representing number of predictors used
:param duplication_threshold: threshold to determine if a column is to be duplicated. Set
this number to be low if you want to encourage column duplication and vice versa
:param max_number: maximum number of times a column is to be duplicated
:param to_scale: bool indicating if a duplicated column is to be scaled
:param max_scale_factor: real representing maximum scale value for repeated columns
:return: a tuple containing two vectors: col_return, col_scale_return.
col_return: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
col_scale_return: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column.
"""
col_indices = list(range(num_cols)) # contains column indices of predictors in original data set
col_scales = [1]*num_cols # scaling factor for original data set, all ones.
for ind in range(num_cols): # determine for each column if to duplicate it
temp = random.uniform(0, 1) # generate random number from 0 to 1
if temp > duplication_threshold: # duplicate column if random number generated exceeds duplication_threshold
rep_num = random.randint(1, max_number) # randomly determine how many times to repeat a column
more_col_indices = [ind]*rep_num
col_indices.extend(more_col_indices)
temp_scale = []
for ind in range(rep_num):
if to_scale: # for each duplicated column, determine a scaling factor to multiply the column with
temp_scale.append(random.uniform(0, max_scale_factor))
else:
temp_scale.append(1)
col_scales.extend(temp_scale)
# randomly shuffle the predictor column orders and the corresponding scaling factors
new_col_indices = list(range(len(col_indices)))
random.shuffle(new_col_indices)
col_return = [col_indices[i] for i in new_col_indices]
col_scale_return = [col_scales[i] for i in new_col_indices]
return col_return, col_scale_return
def duplicate_scale_cols(col_indices, col_scale, old_filename, new_filename):
"""
This function actually performs the column duplication with scaling giving the column
indices and scaling factors for each column. It will first load the original data set
from old_filename. After performing column duplication and scaling, the new data set
will be written to file with new_filename.
:param col_indices: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
:param col_scale: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set is to be stored
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in original data set
#
# pd_frame_new = pd.DataFrame() # new empty data frame
#
# for ind in range(len(col_indices)): # for each column
# tempc = pd_frame.ix[:, col_indices[ind]]*col_scale[ind] # extract a column from old data frame and scale it
# pd_frame_new = pd.concat([pd_frame_new, tempc], axis=1) # add it to the new data frame
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(num_row, num_col) = np_frame.shape
np_frame_new = np.asmatrix(np.zeros((num_row, len(col_indices)), dtype=np.float))
for ind in range(len(col_indices)):
np_frame_new[:, ind] = np_frame[:, col_indices[ind]]*col_scale[ind]
# done changing the data frame. Save it in a new file
np.savetxt(new_filename, np_frame_new, delimiter=",")
def insert_nan_in_data(old_filename, new_filename, missing_fraction):
"""
Give the filename of a data set stored in old_filename, this function will randomly determine
for each predictor to replace its value with nan or not with probability missing_frac. The
new data set will be stored in filename new_filename.
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set with missing
values is to be stored
:param missing_fraction: real value representing the probability of replacing a predictor with nan.
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in a dataset
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(row_count, col_count) = np_frame.shape
random_matrix = np.random.uniform(0, 1, [row_count, col_count-1])
for indr in range(row_count): # for each predictor value, determine if to replace value with nan
for indc in range(col_count-1):
if random_matrix[indr, indc] < missing_fraction:
np_frame[indr, indc] = np.nan
# save new data set with missing values to new file
np.savetxt(new_filename, np_frame, delimiter=",")
# pd_frame.to_csv(new_filename, sep=',', header=False, index=False, na_rep='nan')
def print_message_values(start_string, nump_array):
"""
This function prints the value of a nump_array with a string message in front of it.
:param start_string: string representing message to be printed
:param nump_array: array storing something
:return: None
"""
print(start_string)
print(nump_array)
def show_test_results(test_name, curr_test_val, new_test_val):
"""
This function prints the test execution results which can be passed or failed. A message will be printed on
screen to warn user of the test result.
:param test_name: string representing test name
:param curr_test_val: integer representing number of tests failed so far before the test specified in test_name
is executed
:param new_test_val: integer representing number of tests failed after the test specified in test_name is
executed
:return: integer: 0 if test passed and 1 if test faild.
"""
failed_string = "Ooops, " + test_name + " failed. I am sorry..."
pass_string = "Yeah, " + test_name + " passed!"
if (curr_test_val < new_test_val): # this test has failed
print(failed_string)
return 1
else:
print(pass_string)
return 0
def assert_H2OTwoDimTable_equal(table1, table2, col_header_list, tolerance=1e-6, check_sign=False, check_all=True,
num_per_dim=10):
"""
This method compares two H2OTwoDimTables and verify that their difference is less than value set in tolerance. It
is probably an overkill for I have assumed that the order of col_header_list may not be in the same order as
the values in the table.cell_values[ind][0]. In addition, I do not assume an order for the names in the
table.cell_values[ind][0] either for there is no reason for an order to exist.
To limit the test run time, we can test a randomly sampled of points instead of all points
:param table1: H2OTwoDimTable to be compared
:param table2: the other H2OTwoDimTable to be compared
:param col_header_list: list of strings denote names that we can the comparison to be performed
:param tolerance: default to 1e-6
:param check_sign: bool, determine if the sign of values are important or not. For eigenvectors, they are not.
:param check_all: bool, determine if we need to compare every single element
:param num_per_dim: integer, number of elements to sample per dimension. We have 3 here.
:return: None if comparison succeed and raise an error if comparison failed for whatever reason
"""
num_comparison = len(set(col_header_list))
size1 = len(table1.cell_values)
size2 = len(table2.cell_values)
worst_error = 0
assert size1==size2, "The two H2OTwoDimTables are of different size!"
assert num_comparison<=size1, "H2OTwoDimTable do not have all the attributes specified in col_header_list."
flip_sign_vec = generate_sign_vec(table1, table2) if check_sign else [1]*len(table1.cell_values[0]) # correct for sign change for eigenvector comparisons
randRange1 = generate_for_indices(len(table1.cell_values), check_all, num_per_dim, 0)
randRange2 = generate_for_indices(len(table2.cell_values), check_all, num_per_dim, 0)
for ind in range(num_comparison):
col_name = col_header_list[ind]
next_name=False
for name_ind1 in randRange1:
if col_name!=str(table1.cell_values[name_ind1][0]):
continue
for name_ind2 in randRange2:
if not(col_name==str(table2.cell_values[name_ind2][0])):
continue
# now we have the col header names, do the actual comparison
if str(table1.cell_values[name_ind1][0])==str(table2.cell_values[name_ind2][0]):
randRange3 = generate_for_indices(min(len(table2.cell_values[name_ind2]), len(table1.cell_values[name_ind1])), check_all, num_per_dim,1)
for indC in randRange3:
val1 = table1.cell_values[name_ind1][indC]
val2 = table2.cell_values[name_ind2][indC]*flip_sign_vec[indC]
if isinstance(val1, float) and isinstance(val2, float):
compare_val_ratio = abs(val1-val2)/max(1, abs(val1), abs(val2))
if compare_val_ratio > tolerance:
print("Table entry difference is {0}".format(compare_val_ratio))
assert False, "Table entries are not equal within tolerance."
worst_error = max(worst_error, compare_val_ratio)
else:
assert False, "Tables contains non-numerical values. Comparison is for numericals only!"
next_name=True
break
else:
assert False, "Unknown metric names found in col_header_list."
if next_name: # ready to go to the next name in col_header_list
break
print("******* Congrats! Test passed. Maximum difference of your comparison is {0}".format(worst_error))
def generate_for_indices(list_size, check_all, num_per_dim, start_val):
if check_all:
return list(range(start_val, list_size))
else:
randomList = list(range(start_val, list_size))
shuffle(randomList)
return randomList[0:min(list_size, num_per_dim)]
def generate_sign_vec(table1, table2):
sign_vec = [1]*len(table1.cell_values[0])
for indC in range(1, len(table2.cell_values[0])): # may need to look at other elements since some may be zero
for indR in range(0, len(table2.cell_values)):
if (abs(table1.cell_values[indR][indC]) > 0) and (abs(table2.cell_values[indR][indC]) > 0):
sign_vec[indC] = int(np.sign(table1.cell_values[indR][indC]) * np.sign(table2.cell_values[indR][indC]))
# if (np.sign(table1.cell_values[indR][indC])!=np.sign(table2.cell_values[indR][indC])):
# sign_vec[indC] = -1
# else:
# sign_vec[indC] = 1
break # found what we need. Goto next column
return sign_vec
def equal_two_arrays(array1, array2, eps, tolerance, throwError=True):
"""
This function will compare the values of two python tuples. First, if the values are below
eps which denotes the significance level that we care, no comparison is performed. Next,
False is returned if the different between any elements of the two array exceeds some tolerance.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:return: True if elements in array1 and array2 are close and False otherwise
"""
size1 = len(array1)
if size1 == len(array2): # arrays must be the same size
# compare two arrays
for ind in range(size1):
if not ((array1[ind] < eps) and (array2[ind] < eps)):
# values to be compared are not too small, perform comparison
# look at differences between elements of array1 and array2
compare_val_h2o_Py = abs(array1[ind] - array2[ind])
if compare_val_h2o_Py > tolerance: # difference is too high, return false
if throwError:
assert False, "The two arrays are not equal in value."
else:
return False
return True # return True, elements of two arrays are close enough
else:
if throwError:
assert False, "The two arrays are of different size!"
else:
return False
def equal_2D_tables(table1, table2, tolerance=1e-6):
"""
This function will compare the values of two python tuples. First, if the values are below
eps which denotes the significance level that we care, no comparison is performed. Next,
False is returned if the different between any elements of the two array exceeds some tolerance.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:return: True if elements in array1 and array2 are close and False otherwise
"""
size1 = len(table1)
if size1 == len(table2): # arrays must be the same size
# compare two arrays
for ind in range(size1):
if len(table1[ind]) == len(table2[ind]):
for ind2 in range(len(table1[ind])):
if type(table1[ind][ind2]) == float:
if abs(table1[ind][ind2]-table2[ind][ind2]) > tolerance:
return False
else:
assert False, "The two arrays are of different size!"
return True
else:
assert False, "The two arrays are of different size!"
def compare_two_arrays(array1, array2, eps, tolerance, comparison_string, array1_string, array2_string, error_string,
success_string, template_is_better, just_print=False):
"""
This function is written to print out the performance comparison results for various values that
we care about. It will return 1 if the values of the two arrays exceed threshold specified in tolerance.
The actual comparison is performed by calling function equal_two_array.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:param comparison_string: string stating what the comparison is about, e.g. "Comparing p-values ...."
:param array1_string: string stating what is the array1 attribute of interest, e.g. "H2O p-values: "
:param array2_string: string stating what is the array2 attribute of interest, e.g. "Theoretical p-values: "
:param error_string: string stating what you want to say if the difference between array1 and array2
exceeds tolerance, e.g "P-values are not equal!"
:param success_string: string stating what you want to say if the difference between array1 and array2 does not
exceed tolerance "P-values are close enough!"
:param template_is_better: bool, True, will return 1 if difference among elements of array1 and array2 exceeds
tolerance. False, will always return 0 even if difference among elements of array1 and array2 exceeds tolerance.
In this case, the system under test actually performs better than the template.
:param just_print: bool if True will print attribute values without doing comparison. False will print
attribute values and perform comparison
:return: if template_is_better = True, return 0 if elements in array1 and array2 are close and 1 otherwise;
if template_is_better = False, will always return 0 since system under tests performs better than
template system.
"""
# display array1, array2 with proper description
print(comparison_string)
print(array1_string, array1)
print(array2_string, array2)
if just_print: # just print the two values and do no comparison
return 0
else: # may need to actually perform comparison
if template_is_better:
try:
assert equal_two_arrays(array1, array2, eps, tolerance), error_string
print(success_string)
sys.stdout.flush()
return 0
except:
sys.stdout.flush()
return 1
else:
print("Test result is actually better than comparison template!")
return 0
def make_Rsandbox_dir(base_dir, test_name, make_dir):
"""
This function will remove directory "Rsandbox/test_name" off directory base_dir and contents if it exists.
If make_dir is True, it will create a clean directory "Rsandbox/test_name" off directory base_dir.
:param base_dir: string contains directory path where we want to build our Rsandbox/test_name off from
:param test_name: string contains unit test name that the Rsandbox is created for
:param make_dir: bool, True: will create directory baseDir/Rsandbox/test_name, False: will not create
directory.
:return: syndatasets_dir: string containing the full path of the directory name specified by base_dir, test_name
"""
# create the Rsandbox directory path for the test.
syndatasets_dir = os.path.join(base_dir, "Rsandbox_" + test_name)
if os.path.exists(syndatasets_dir): # remove Rsandbox directory if it exists
shutil.rmtree(syndatasets_dir)
if make_dir: # create Rsandbox directory if make_dir is True
os.makedirs(syndatasets_dir)
return syndatasets_dir
def get_train_glm_params(model, what_param, family_type='gaussian'):
"""
This function will grab the various attributes (like coefficients, p-values, and others) off a GLM
model that has been built.
:param model: GLM model that we want to extract information from
:param what_param: string indicating the model attribute of interest like 'p-value','weights',...
:param family_type: string, optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:return: attribute value of interest
"""
coeff_pvalues = model._model_json["output"]["coefficients_table"].cell_values
if what_param == 'p-values':
if 'gaussian' in family_type.lower():
p_value_h2o = []
for ind in range(len(coeff_pvalues)):
p_value_h2o.append(coeff_pvalues[ind][-1])
return p_value_h2o
else:
assert False, "P-values are only available to Gaussian family."
elif what_param == 'weights':
if 'gaussian' in family_type.lower():
weights = []
for ind in range(len(coeff_pvalues)):
weights.append(coeff_pvalues[ind][1])
return weights
elif ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
# for multinomial, the coefficients are organized as features by number of classes for
# nonstandardized and then standardized weights. Need to grab the correct matrix as
# number of classes by n_features matrix
num_feature = len(coeff_pvalues)
num_class = (len(coeff_pvalues[0])-1)/2
coeffs = np.zeros((num_class,num_feature), dtype=np.float)
end_index = int(num_class+1)
for col_index in range(len(coeff_pvalues)):
coeffs[:, col_index] = coeff_pvalues[col_index][1:end_index]
return coeffs
elif what_param == 'best_lambda':
lambda_str = model._model_json["output"]["model_summary"].cell_values[0][4].split('=')
return float(str(lambda_str[-2]).split(',')[0])
elif what_param == 'confusion_matrix':
if 'multinomial' in family_type.lower():
return model._model_json["output"]["training_metrics"]._metric_json["cm"]["table"]
elif 'binomial' in family_type.lower():
return model.confusion_matrix().table
else:
assert False, "parameter value not found in GLM model"
def less_than(val1, val2):
"""
Simple function that returns True if val1 <= val2 and False otherwise.
:param val1: first value of interest
:param val2: second value of interest
:return: bool: True if val1 <= val2 and False otherwise
"""
if round(val1, 3) <= round(val2, 3): # only care to the 3rd position after decimal point
return True
else:
return False
def replace_nan_with_mean(data_with_nans, nans_row_col_indices, col_means):
"""
Given a data set with nans, row and column indices of where the nans are and the col_means, this
function will replace the nans with the corresponding col_means.
:param data_with_nans: data set matrix with nans
:param nans_row_col_indices: matrix containing the row and column indices of where the nans are
:param col_means: vector containing the column means of data_with_NAs
:return: data_with_NAs: data set with nans replaced with column means
"""
num_NAs = len(nans_row_col_indices[0])
for ind in range(num_NAs):
data_with_nans[nans_row_col_indices[0][ind], nans_row_col_indices[1][ind]] = \
col_means[nans_row_col_indices[1][ind]]
return data_with_nans
def remove_csv_files(dir_path, suffix=".csv", action='remove', new_dir_path=""):
"""
Given a directory, this function will gather all function ending with string specified
in suffix. Next, it is going to delete those files if action is set to 'remove'. If
action is set to 'copy', a new_dir_path must be specified where the files ending with suffix
will be moved to this new directory instead.
:param dir_path: string representing full path to directory of interest
:param suffix: string representing suffix of filename that are to be found and deleted
:param action: string, optional, denote the action to perform on files, 'remove' or 'move'
:param new_dir_path: string, optional, representing full path to new directory
:return: None
"""
filenames = os.listdir(dir_path) # list all files in directory
# only collect files with filename ending with suffix
to_remove = [filename for filename in filenames if filename.endswith(suffix)]
# delete files ending with suffix
for fn in to_remove:
temp_fn = os.path.join(dir_path, fn)
# only remove if file actually exists.
if os.path.isfile(temp_fn):
if 'remove' in action:
remove_files(temp_fn)
elif 'copy' in action:
move_files(new_dir_path, temp_fn, fn, action=action)
else:
assert False, "action string can only be 'remove' or 'copy."
def extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, end_test_str, want_p_values,
attr1_bool, attr2_bool, att1_template, att2_template, att3_template,
att4_template, compare_att1_str, h2o_att1_str, template_att1_str,
att1_str_fail, att1_str_success, compare_att2_str, h2o_att2_str,
template_att2_str, att2_str_fail, att2_str_success, compare_att3_str,
h2o_att3_str, template_att3_str, att3_str_fail, att3_str_success,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, failed_test_number, ignored_eps, allowed_diff,
noise_var, template_must_be_better, attr3_bool=True, attr4_bool=True):
"""
This function basically will compare four attributes (weight, p-values, training data MSE, test data MSE) of a test
with a template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about p-values and/or weight comparisons but mainly
concerned with MSEs. We can set the input parameters to indicate if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param end_test_str: string representing end test banner to be printed
:param want_p_values: bool True if we want to care about p-values and False if we don't
:param attr1_bool: bool True if we want to compare weight difference between H2O model and template model
and False otherwise.
:param attr2_bool: bool True if we want to compare p-value difference between H2O model and template model
and False otherwise.
:param att1_template: value of first template attribute, the weight vector
:param att2_template: value of second template attribute, the p-value vector
:param att3_template: value of third template attribute, the training data set MSE
:param att4_template: value of fourth template attribute, the test data set MSE
:param compare_att1_str: string describing the comparison of first attribute, e.g. "Comparing intercept and
weights ...."
:param h2o_att1_str: string describing H2O model first attribute values, e.g. "H2O intercept and weights: "
:param template_att1_str: string describing template first attribute values, e.g. "Theoretical intercept and
weights: "
:param att1_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Intercept and weights are not equal!"
:param att1_str_success: string describing message to print out if difference < threshold, e.g.
"Intercept and weights are close enough!"
:param compare_att2_str: string describing the comparison of first attribute, e.g. "Comparing p-values ...."
:param h2o_att2_str: string describing H2O model first attribute values, e.g. "H2O p-values: "
:param template_att2_str: string describing template first attribute values, e.g. "Theoretical p-values: "
:param att2_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"P-values are not equal!"
:param att2_str_success: string describing message to print out if difference < threshold, e.g.
"P-values are close enough!"
:param compare_att3_str: string describing the comparison of first attribute, e.g. "Comparing training MSEs ...."
:param h2o_att3_str: string describing H2O model first attribute values, e.g. "H2O training MSE: "
:param template_att3_str: string describing template first attribute values, e.g. "Theoretical train MSE: "
:param att3_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Training MSEs are not equal!"
:param att3_str_success: string describing message to print out if difference < threshold, e.g.
"Training MSEs are close enough!"
:param compare_att4_str: string describing the comparison of first attribute, e.g. "Comparing test MSEs ...."
:param h2o_att4_str: string describing H2O model first attribute values, e.g. "H2O test MSE: "
:param template_att4_str: string describing template first attribute values, e.g. "Theoretical test MSE: "
:param att4_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Test MSEs are not equal!"
:param att4_str_success: string describing message to print out if difference < threshold, e.g.
"Test MSEs are close enough!"
:param failed_test_number: integer denote the number of tests failed
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param noise_var: Gaussian noise variance used to generate data set
:param template_must_be_better: bool: True: template value must be lower, False: don't care
:param attr3_bool: bool denoting if we should compare attribute 3 values
:param attr4_bool: bool denoting if we should compare attribute 4 values
:return: a tuple containing test h2o model training and test performance metrics that include: weight, pValues,
mse_train, r2_train, mse_test, r2_test
"""
# grab weight from h2o model
test1_weight = get_train_glm_params(model_h2o, 'weights')
# grab p-values from h2o model
test1_p_values = []
if want_p_values:
test1_p_values = get_train_glm_params(model_h2o, 'p-values')
# grab other performance metrics
test1_mse_train = model_h2o.mse()
test1_r2_train = model_h2o.r2()
test1_mse_test = h2o_model_test_metrics.mse()
test1_r2_test = h2o_model_test_metrics.r2()
# compare performances of template and h2o model weights
failed_test_number += compare_two_arrays(test1_weight, att1_template, ignored_eps, allowed_diff*100, compare_att1_str,
h2o_att1_str, template_att1_str, att1_str_fail, att1_str_success,
attr1_bool)
# p-values
if want_p_values:
if np.isnan(np.asarray(test1_p_values)).any(): # p-values contain nan
failed_test_number += 1
failed_test_number += compare_two_arrays(test1_p_values, att2_template, ignored_eps, allowed_diff,
compare_att2_str, h2o_att2_str, template_att2_str, att2_str_fail,
att2_str_success, attr2_bool)
# Training MSE
need_to_compare = less_than(att3_template, test1_mse_train)
# in some cases, template value should always be better. Training data MSE should always
# be better without regularization than with regularization
if (not need_to_compare) and template_must_be_better:
failed_test_number += 1
failed_test_number += compare_two_arrays([test1_mse_train], [att3_template], ignored_eps, noise_var,
compare_att3_str, h2o_att3_str,
template_att3_str, att3_str_fail, att3_str_success, attr3_bool)
# Test MSE
need_to_compare = less_than(att4_template, test1_mse_test)
failed_test_number += compare_two_arrays([test1_mse_test], [att4_template], ignored_eps, noise_var,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, need_to_compare, attr4_bool)
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return test1_weight, test1_p_values, test1_mse_train, test1_r2_train, test1_mse_test,\
test1_r2_test, failed_test_number
def extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics, family_type, end_test_str,
compare_att_str=["", "", "", "", "", "", ""],
h2o_att_str=["", "", "", "", "", "", ""],
template_att_str=["", "", "", "", "", "", ""],
att_str_fail=["", "", "", "", "", "", ""],
att_str_success=["", "", "", "", "", "", ""],
test_model=None, test_model_metric=None, template_params=None,
can_be_better_than_template=[
False, False, False, False, False, False],
just_print=[True, True, True, True, True, True],
ignored_eps=1e-15, allowed_diff=1e-5, failed_test_number=0):
"""
This function basically will compare and print out six performance metrics of a test with a
template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about comparisons but mainly concerned with
logloss/prediction accuracy in determining if a test shall fail. We can set the input parameters to indicate
if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param end_test_str: string to be printed at the end of a test
:param compare_att_str: array of strings describing what we are trying to compare
:param h2o_att_str: array of strings describing each H2O attribute of interest
:param template_att_str: array of strings describing template attribute of interest
:param att_str_fail: array of strings to be printed if the comparison failed
:param att_str_success: array of strings to be printed if comparison succeeded
:param test_model: template model whose attributes we want to compare our H2O model with
:param test_model_metric: performance on test data set of template model
:param template_params: array containing template attribute values that we want to compare our H2O model with
:param can_be_better_than_template: array of bool: True: template value must be lower, False: don't care
:param just_print: array of bool for each attribute if True, no comparison is performed, just print the attributes
and if False, will compare the attributes and print the attributes as well
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param failed_test_number: integer denote the number of tests failed so far
:return: accumulated number of tests that have failed so far
"""
# grab performance metrics from h2o model
(h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,
h2o_confusion_matrix_test, h2o_accuracy_test) = grab_model_params_metrics(model_h2o, h2o_model_test_metrics,
family_type)
# grab performance metrics from template model
if test_model and test_model_metric:
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = \
grab_model_params_metrics(test_model, test_model_metric, family_type)
elif template_params:
# grab template comparison values from somewhere else
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = template_params
else:
assert False, "No valid template parameters are given for comparison."
# print and/or compare the weights between template and H2O
compare_index = 0
failed_test_number += compare_two_arrays(h2o_weight, template_weight, ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# this is logloss from training data set,
if not(just_print[compare_index]) and not(can_be_better_than_template[compare_index]):
if (h2o_logloss_train < template_logloss_train) and \
(abs(h2o_logloss_train-template_logloss_train) > 1e-5):
# H2O performed better than template which is not allowed
failed_test_number += 1 # increment failed_test_number and just print the results
compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, True)
else:
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index], True,
False)
else:
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_train, template_logloss_train, False)
# print and compare the logloss between template and H2O for training data
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index],
template_better, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_test, template_logloss_test, False)
# print and compare the logloss between template and H2O for test data
failed_test_number += compare_two_arrays([h2o_logloss_test], [template_logloss_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print the confusion matrix from training data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_train, template_confusion_matrix_train, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# print the confusion matrix from test data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_test, template_confusion_matrix_test, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_train, template_accuracy_train, True)
# print accuracy from training dataset
failed_test_number += compare_two_arrays([h2o_accuracy_train], [template_accuracy_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print accuracy from test dataset
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_test, template_accuracy_test, True)
failed_test_number += compare_two_arrays([h2o_accuracy_test], [template_accuracy_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return failed_test_number
def is_template_better(just_print, can_be_better_than_template, h2o_att, template_att, bigger_is_better):
"""
This function is written to determine if the system under test performs better than the template model
performance.
:param just_print: bool representing if we are just interested in printing the attribute values
:param can_be_better_than_template: bool stating that it is okay in this case for the system under test to perform
better than the template system.
:param h2o_att: number representing the h2o attribute under test
:param template_att: number representing the template attribute
:param bigger_is_better: bool representing if metric is perceived to be better if its value is higher
:return: bool indicating if the template attribute is better.
"""
if just_print: # not interested in comparison, just want to print attribute values
return True # does not matter what we return here
else:
if bigger_is_better: # metric is better if it is greater
return not(h2o_att > template_att)
else: # metric is better if it is less
return not(h2o_att < template_att)
def grab_model_params_metrics(model_h2o, h2o_model_test_metrics, family_type):
"""
This function will extract and return the various metrics from a H2O GLM model and the corresponding H2O model
test metrics.
:param model_h2o: GLM H2O model
:param h2o_model_test_metrics: performance on test data set from H2O GLM model
:param family_type: string representing 'gaussian', 'binomial' or 'multinomial'
:return: tuple containing weight, logloss/confusion matrix/prediction accuracy calculated from training data set
and test data set respectively
"""
# grab weight from h2o model
h2o_weight = get_train_glm_params(model_h2o, 'weights', family_type=family_type)
# grab other performance metrics
h2o_logloss_train = model_h2o.logloss()
h2o_confusion_matrix_train = get_train_glm_params(model_h2o, 'confusion_matrix', family_type=family_type)
last_index = len(h2o_confusion_matrix_train.cell_values)-1
h2o_logloss_test = h2o_model_test_metrics.logloss()
if 'multinomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix()
h2o_accuracy_train = 1-h2o_confusion_matrix_train.cell_values[last_index][last_index]
h2o_accuracy_test = 1-h2o_confusion_matrix_test.cell_values[last_index][last_index]
elif 'binomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix().table
real_last_index = last_index+1
h2o_accuracy_train = 1-float(h2o_confusion_matrix_train.cell_values[last_index][real_last_index])
h2o_accuracy_test = 1-float(h2o_confusion_matrix_test.cell_values[last_index][real_last_index])
else:
assert False, "Only 'multinomial' and 'binomial' distribution families are supported for " \
"grab_model_params_metrics function!"
return h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,\
h2o_confusion_matrix_test, h2o_accuracy_test
def prepare_data_sklearn_multinomial(training_data_xy):
"""
Sklearn model requires that the input matrix should contain a column of ones in order for
it to generate the intercept term. In addition, it wants the response vector to be in a
certain format as well.
:param training_data_xy: matrix containing both the predictors and response column
:return: tuple containing the predictor columns with a column of ones as the first column and
the response vector in the format that Sklearn wants.
"""
(num_row, num_col) = training_data_xy.shape
# change response to be enum and not real
y_ind = num_col-1
training_data_xy[y_ind] = training_data_xy[y_ind].astype(int)
# prepare response column for sklearn logistic regression
response_y = training_data_xy[:, y_ind]
response_y = np.ravel(response_y)
training_data = training_data_xy[:, range(0, y_ind)]
# added column of ones into data matrix X_MAT
temp_ones = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones, training_data), axis=1)
return response_y, x_mat
def get_gridables(params_in_json):
"""
This function is written to walk through all parameters of a model and grab the parameters, its type and
its default values as three lists of all the gridable parameters.
:param params_in_json: a list of parameters associated with a H2O model. Each list is a dict containing fields
of interest like name, type, gridable, default values, ....
:return: three lists: gridable_params, gridable_types and gridable_defaults containing the names of the parameter,
its associated type like int, float, unicode, bool and default parameter values
"""
# grab all gridable parameters and its type
gridable_parameters = []
gridable_types = []
gridable_defaults = []
for each_param in params_in_json:
if each_param['gridable']:
gridable_parameters.append(str(each_param["name"]))
gridable_types.append(each_param["type"])
if type(each_param["default_value"]) == 'unicode': # hyper-parameters cannot be unicode
gridable_defaults.append(str(each_param["default_value"]))
else:
gridable_defaults.append(each_param["default_value"])
return gridable_parameters, gridable_types, gridable_defaults
def add_fold_weights_offset_columns(h2o_frame, nfold_max_weight_offset, column_names, column_type='fold_assignment'):
"""
Add fold_columns to H2O training frame specified in h2o_frame according to nfold. The new added
columns should use the names in column_names. Returns a h2o_frame with newly added fold_columns.
Copied from Eric's code.
:param h2o_frame: H2O frame containing training data
:param nfold_max_weight_offset: integer, number of fold in the cross-validation or maximum weight scale or offset
:param column_names: list of strings denoting the column names for the new fold columns
:param column_type: optional string denoting whether we are trying to generate fold_assignment or
weights_column or offset_column
:return: H2O frame with added fold column assignments
"""
number_row = h2o_frame.nrow
# copied this part from Eric's code
for index in range(len(column_names)):
if 'fold_assignment' in column_type:
temp_a = np.random.random_integers(0, nfold_max_weight_offset - 1, [number_row, 1]) # inclusive
elif 'weights_column' in column_type:
temp_a = np.random.uniform(0, nfold_max_weight_offset, [number_row, 1])
elif 'offset_column' in column_type:
temp_a = random.uniform(0, nfold_max_weight_offset)*np.asmatrix(np.ones(number_row)).transpose()
else:
assert False, "column_type must be either 'fold_assignment' or 'weights_column'!"
fold_assignments = h2o.H2OFrame(temp_a)
fold_assignments.set_names([column_names[index]])
h2o_frame = h2o_frame.cbind(fold_assignments)
return h2o_frame
def gen_grid_search(model_params, hyper_params, exclude_parameters, gridable_parameters, gridable_types,
gridable_defaults, max_int_number, max_int_val, min_int_val, max_real_number, max_real_val,
min_real_val, quantize_level='1.00000000'):
"""
This function is written to randomly generate griddable parameters for a gridsearch. For parameters already
found in hyper_params, no random list will be generated. In addition, we will check to make sure that the
griddable parameters are actually used by the model before adding them to the hyper_params dict.
:param model_params: list of string containing names of argument to the model
:param hyper_params: dict structure containing a list of gridable parameters names with their list
:param exclude_parameters: list containing parameter names not to be added to hyper_params
:param gridable_parameters: list of gridable parameter names
:param gridable_types: list of gridable parameter types
:param gridable_defaults: list of gridable parameter default values
:param max_int_number: integer, size of integer gridable parameter list
:param max_int_val: integer, maximum integer value for integer gridable parameter
:param min_int_val: integer, minimum integer value for integer gridable parameter
:param max_real_number: integer, size of real gridable parameter list
:param max_real_val: float, maximum real value for real gridable parameter
:param min_real_val: float, minimum real value for real gridable parameter
:param quantize_level: string representing the quantization level of floating point values generated randomly.
:return: a tuple of hyper_params: dict of hyper parameters for gridsearch, true_gridable_parameters:
a list of string containing names of truely gridable parameters, true_gridable_types: a list of string
denoting parameter types and true_gridable_defaults: default values of those truly gridable parameters
"""
count_index = 0
true_gridable_parameters = []
true_gridable_types = []
true_gridable_defaults = []
for para_name in gridable_parameters:
# parameter must not in exclusion list
if (para_name in model_params) and (para_name not in exclude_parameters):
true_gridable_parameters.append(para_name)
true_gridable_types.append(gridable_types[count_index])
true_gridable_defaults.append(gridable_defaults[count_index])
if para_name not in hyper_params.keys(): # add default value to user defined parameter list
# gridable parameter not seen before. Randomly generate values for it
if ('int' in gridable_types[count_index]) or ('long' in gridable_types[count_index]):
# make sure integer values are not duplicated, using set action to remove duplicates
hyper_params[para_name] = list(set([random.randint(min_int_val, max_int_val) for p in
range(0, max_int_number)]))
elif ('double' in gridable_types[count_index]) or ('float' in gridable_types[count_index]):
hyper_params[para_name] = fix_float_precision(list(np.random.uniform(min_real_val, max_real_val,
max_real_number)), quantize_level=quantize_level)
count_index += 1
return hyper_params, true_gridable_parameters, true_gridable_types, true_gridable_defaults
def fix_float_precision(float_list, quantize_level='1.00000000'):
"""
This function takes in a floating point tuple and attempt to change it to floating point number with fixed
precision.
:param float_list: tuple/list of floating point numbers
:param quantize_level: string, optional, represent the number of fix points we care
:return: tuple of floats to the exact precision specified in quantize_level
"""
fixed_float = []
for num in float_list:
fixed_float.append(float(Decimal(num).quantize(Decimal(quantize_level))))
return list(set(fixed_float))
def extract_used_params_xval(a_grid_model, model_param_names, params_dict, algo="GBM"):
"""
This function performs similar functions to function extract_used_params. However, for max_runtime_secs,
we need to go into each cross-valudation model and grab the max_runtime_secs and add them up in order to
get the correct value. In addition, we put your algo model specific parameters into params_dict.
:param a_grid_model: list of models generated by gridsearch
:param model_param_names: hyper-parameter names that are specified for the gridsearch.
:param params_dict: dict containing name/value pairs specified to an algo.
:param algo: string, optional, denoting the algo we are looking at.
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
# need to extract the max_runtime_secs ONE cross-validation model or the base model
if a_grid_model._is_xvalidated:
xv_keys = a_grid_model._xval_keys
for id in xv_keys: # only need to get info from one model
each_xv_model = h2o.get_model(id) # get each model
params_used = extract_used_params(model_param_names, each_xv_model.params, params_dict, algo)
break
else:
params_used = extract_used_params(model_param_names, a_grid_model.params, params_dict, algo)
return params_used
def extract_used_params(model_param_names, grid_model_params, params_dict, algo="GLM"):
"""
This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given
the dict structure that describes the parameters and their values used by gridsearch to build that
particular mode.
:param model_param_names: list contains parameter names that we are interested in extracting
:param grid_model_params: dict contains key as names of parameter and values as list of two values: default and
actual.
:param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian',
'binomial', ...
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
grid_model_params_keys = grid_model_params.keys()
for each_parameter in model_param_names:
parameter_name = str(each_parameter)
if parameter_name in grid_model_params_keys:
params_used[parameter_name] = grid_model_params[each_parameter]['actual']
if params_dict:
for key, value in params_dict.items():
params_used[key] = value # add distribution family to parameters used list
# only for GLM, change lambda to Lambda
if algo =="GLM":
if 'lambda' in params_used.keys():
params_used['Lambda'] = params_used['lambda']
del params_used['lambda']
return params_used
def insert_error_grid_search(hyper_params, gridable_parameters, gridable_types, error_number):
"""
This function will randomly introduce errors into a copy of hyper_params. Depending on the random number
error_number generated, the following errors can be introduced:
error_number = 0: randomly alter the name of a hyper-parameter name;
error_number = 1: randomly choose a hyper-parameter and remove all elements in its list
error_number = 2: add randomly generated new hyper-parameter names with random list
error_number other: randomly choose a hyper-parameter and insert an illegal type into it
:param hyper_params: dict containing all legal hyper-parameters for our grid search
:param gridable_parameters: name of griddable parameters (some may not be griddable)
:param gridable_types: type of griddable parameters
:param error_number: integer representing which errors to introduce into the gridsearch hyper-parameters
:return: new dict with errors in either parameter names or parameter values
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
param_index = random.randint(0, len(hyper_params)-1)
param_name = list(hyper_params)[param_index]
param_type = gridable_types[gridable_parameters.index(param_name)]
if error_number == 0: # grab a hyper-param randomly and copy its name twice
new_name = param_name+param_name
error_hyper_params[new_name] = error_hyper_params[param_name]
del error_hyper_params[param_name]
elif error_number == 1:
error_hyper_params[param_name] = []
elif error_number == 2:
new_param = generate_random_words(random.randint(20,100))
error_hyper_params[new_param] = error_hyper_params[param_name]
else:
error_hyper_params = insert_bad_value(error_hyper_params, param_name, param_type)
return error_hyper_params
def insert_bad_value(error_hyper_params, param_name, param_type):
"""
This function is written to insert a value that is of a different type into an array than the one
its other elements are for.
:param error_hyper_params: dict containing all hyper-parameters for a grid search
:param param_name: string denoting the hyper-parameter we want to insert bad element to
:param param_type: string denoting hyper-parameter type
:return: dict containing new inserted error value
"""
if 'int' in param_type: # insert a real number into integer
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'enum' in param_type: # insert an float into enums
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'double' in param_type: # insert an enum into float
error_hyper_params[param_name].append(random.uniform(0,1) > 0.5)
else: # insert a random string for all other cases
error_hyper_params[param_name].append(generate_random_words(random.randint(20,100)))
return error_hyper_params
def generate_random_words(word_length):
"""
This function will generate a random word consisting of letters, numbers and
punctuation given the word_length.
:param word_length: integer denoting length of the word
:return: string representing the random word
"""
if word_length > 0:
all_chars = string.ascii_letters + string.digits + string.punctuation
return ''.join((random.choice(all_chars)) for index in range(int(word_length)))
else:
assert False, "word_length must be an integer greater than 0."
def generate_redundant_parameters(hyper_params, gridable_parameters, gridable_defaults, error_number):
"""
This function will randomly choose a set of hyper_params and make a dict out of it so we can
duplicate the parameter specification in both the model and grid search.
:param hyper_params: dict containing all griddable parameters as hyper_param to grid search
:param gridable_parameters: list of gridable parameters (not truly)
:param gridable_defaults: list of default values for gridable parameters
:param error_number: int, indicate ways to change the model parameter and the hyper-parameter
Here are the actions performed on the model parameter and hyper-parameters.
error_number = 0: set model parameter to be a value out of the hyper-parameter value list, should not
generate error;
error_number = 1: set model parameter to be default value, should not generate error in this case;
error_number = 3: make sure model parameter is not set to default and choose a value not in the
hyper-parameter value list.
:return: 2 dicts containing duplicated parameters with specification, new hyperparameter specification
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
params_dict = {}
num_params = random.randint(1, len(error_hyper_params))
params_list = list(error_hyper_params)
# remove default values out of hyper_params
for key in params_list:
default_value = gridable_defaults[gridable_parameters.index(key )]
if default_value in error_hyper_params[key]:
error_hyper_params[key].remove(default_value)
for index in range(num_params):
param_name = params_list[index]
hyper_params_len = len(error_hyper_params[param_name])
if error_number == 0:
# randomly assigned the parameter to take one value out of the list
param_value_index = random.randint(0, len(error_hyper_params[param_name])-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
elif error_number == 1:
param_value_index = gridable_parameters.index(param_name)
params_dict[param_name] = gridable_defaults[param_value_index]
else:
# randomly assign model parameter to one of the hyper-parameter values, should create error condition here
param_value_index = random.randint(0, hyper_params_len-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
# final check to make sure lambda is Lambda
if 'lambda' in list(params_dict):
params_dict["Lambda"] = params_dict['lambda']
del params_dict["lambda"]
return params_dict, error_hyper_params
def count_models(hyper_params):
"""
Given a hyper_params dict, this function will return the maximum number of models that can be built out of all
the combination of hyper-parameters.
:param hyper_params: dict containing parameter name and a list of values to iterate over
:return: max_model_number: int representing maximum number of models built
"""
max_model_number = 1
for key in list(hyper_params):
max_model_number *= len(hyper_params[key])
return max_model_number
def error_diff_2_models(grid_table1, grid_table2, metric_name):
"""
This function will take two models generated by gridsearch and calculate the mean absolute differences of
the metric values specified by the metric_name in the two model. It will return the mean differences.
:param grid_table1: first H2OTwoDimTable generated by gridsearch
:param grid_table2: second H2OTwoDimTable generated by gridsearch
:param metric_name: string, name of the metric of interest
:return: real number which is the mean absolute metric difference between the two models
"""
num_model = len(grid_table1.cell_values)
metric_diff = 0
for model_index in range(num_model):
metric_diff += abs(grid_table1.cell_values[model_index][-1] - grid_table2.cell_values[model_index][-1])
if (num_model > 0):
return metric_diff/num_model
else:
assert False, "error_diff_2_models: your table contains zero models."
def find_grid_runtime(model_list):
"""
This function given a grid_model built by gridsearch will go into the model and calculate the total amount of
time it took to actually build all the models in second
:param model_list: list of model built by gridsearch, cartesian or randomized with cross-validation
enabled.
:return: total_time_sec: total number of time in seconds in building all the models
"""
total_time_sec = 0
for each_model in model_list:
total_time_sec += each_model._model_json["output"]["run_time"] # time in ms
# if cross validation is used, need to add those run time in here too
if each_model._is_xvalidated:
xv_keys = each_model._xval_keys
for id in xv_keys:
each_xv_model = h2o.get_model(id)
total_time_sec += each_xv_model._model_json["output"]["run_time"]
return total_time_sec/1000.0 # return total run time in seconds
def evaluate_metrics_stopping(model_list, metric_name, bigger_is_better, search_criteria, possible_model_number):
"""
This function given a list of dict that contains the value of metric_name will manually go through the
early stopping condition and see if the randomized grid search will give us the correct number of models
generated. Note that you cannot assume the model_list is in the order of when a model is built. It actually
already come sorted which we do not want....
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:param bigger_is_better: bool indicating if the metric is optimized by getting bigger if True and vice versa
:param search_criteria: dict structure containing the search criteria for randomized gridsearch
:param possible_model_number: integer, represent the absolute possible number of models built based on the
hyper-parameter size
:return: bool indicating if the early topping condition is justified
"""
tolerance = search_criteria["stopping_tolerance"]
stop_round = search_criteria["stopping_rounds"]
min_list_len = 2*stop_round # minimum length of metrics needed before we start early stopping evaluation
metric_list = [] # store metric of optimization
stop_now = False
# provide metric list sorted by time. Oldest model appear first.
metric_list_time_ordered = sort_model_by_time(model_list, metric_name)
for metric_value in metric_list_time_ordered:
metric_list.append(metric_value)
if len(metric_list) > min_list_len: # start early stopping evaluation now
stop_now = evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better)
if stop_now:
if len(metric_list) < len(model_list): # could have stopped early in randomized gridsearch
return False
else: # randomized gridsearch stopped at the correct condition
return True
if len(metric_list) == possible_model_number: # never meet early stopping condition at end of random gridsearch
return True # if max number of model built, still ok
else:
return False # early stopping condition never met but random gridsearch did not build all models, bad!
def sort_model_by_time(model_list, metric_name):
"""
This function is written to sort the metrics that we care in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:return: model_metric_list sorted by time
"""
model_num = len(model_list)
model_metric_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1])
model_metric_list[model_index] = \
model_list[index]._model_json["output"]["cross_validation_metrics"]._metric_json[metric_name]
return model_metric_list
def evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better):
"""
This function mimics the early stopping function as implemented in ScoreKeeper.java. Please see the Java file
comment to see the explanation of how the early stopping works.
:param metric_list: list containing the optimization metric under consideration for gridsearch model
:param stop_round: integer, determine averaging length
:param tolerance: real, tolerance to see if the grid search model has improved enough to keep going
:param bigger_is_better: bool: True if metric is optimized as it gets bigger and vice versa
:return: bool indicating if we should stop early and sorted metric_list
"""
metric_len = len(metric_list)
metric_list.sort(reverse=bigger_is_better)
shortest_len = 2*stop_round
bestInLastK = 1.0*sum(metric_list[0:stop_round])/stop_round
lastBeforeK = 1.0*sum(metric_list[stop_round:shortest_len])/stop_round
if not(np.sign(bestInLastK) == np.sign(lastBeforeK)):
return False
ratio = bestInLastK/lastBeforeK
if math.isnan(ratio):
return False
if bigger_is_better:
return not (ratio > 1+tolerance)
else:
return not (ratio < 1-tolerance)
def check_and_count_models(hyper_params, params_zero_one, params_more_than_zero, params_more_than_one,
params_zero_positive, max_grid_model):
"""
This function will look at the hyper-parameter space set in hyper_params, generate a new hyper_param space that
will contain a smaller number of grid_models. It will determine how many models will be built from
this new hyper_param space. In order to arrive at the correct answer, it must discount parameter settings that
are illegal.
:param hyper_params: dict containing model parameter names and list of values to set it to
:param params_zero_one: list containing model parameter names whose values must be between 0 and 1
:param params_more_than_zero: list containing model parameter names whose values must exceed zero
:param params_more_than_one: list containing model parameter names whose values must exceed one
:param params_zero_positive: list containing model parameter names whose values must equal to or exceed zero
:param max_grid_model: maximum number of grid_model that can be generated from the new hyper_params space
:return: total model: integer denoting number of grid models that can be built from all legal parameter settings
in new hyper_parameter space
final_hyper_params: dict of new hyper parameter space derived from the original hyper_params
"""
total_model = 1
param_len = 0
hyper_keys = list(hyper_params)
shuffle(hyper_keys) # get all hyper_parameter names in random order
final_hyper_params = dict()
for param in hyper_keys:
# this param should be > 0 and <= 2
if param == "col_sample_rate_change_per_level":
param_len = len([x for x in hyper_params["col_sample_rate_change_per_level"] if (x > 0)
and (x <= 2)])
elif param in params_zero_one:
param_len = len([x for x in hyper_params[param] if (x >= 0)
and (x <= 1)])
elif param in params_more_than_zero:
param_len = len([x for x in hyper_params[param] if (x > 0)])
elif param in params_more_than_one:
param_len = len([x for x in hyper_params[param] if (x > 1)])
elif param in params_zero_positive:
param_len = len([x for x in hyper_params[param] if (x >= 0)])
else:
param_len = len(hyper_params[param])
if (param_len >= 0) and ((total_model*param_len) <= max_grid_model):
total_model *= param_len
final_hyper_params[param] = hyper_params[param]
elif (total_model*param_len) > max_grid_model:
break
return total_model, final_hyper_params
def write_hyper_parameters_json(dir1, dir2, json_filename, hyper_parameters):
"""
Write a json file of the hyper_parameters in directories dir1 and dir2 for debugging purposes.
:param dir1: String containing first directory where you want to write the json file to
:param dir2: String containing second directory where you want to write the json file to
:param json_filename: String containing json file name
:param hyper_parameters: dict containing hyper-parameters used
"""
# save hyper-parameter file in test directory
with open(os.path.join(dir1, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
# save hyper-parameter file in sandbox
with open(os.path.join(dir2, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
def compare_frames(frame1, frame2, numElements, tol_time=0, tol_numeric=0, strict=False, compare_NA=True):
"""
This function will compare two H2O frames to make sure their dimension, and values in all cells are the same.
It will not compare the column names though.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param numElements: integer to denote number of rows to compare. Done to reduce compare time.
Set to 0 or negative number if you want to compare all elements.
:param tol_time: optional parameter to limit time value difference.
:param tol_numerica: optional parameter to limit numeric value difference.
:param strict: optional parameter to enforce strict comparison or not. If True, column type must
match in order to pass the test.
:param compare_NA: optional parameter to compare NA or not. For csv file generated from orc file, the
NAs are represented as some other symbol but our CSV will not be able to parse it correctly as NA.
In this case, do not compare the number of NAs.
:return: boolean: True, the two frames are equal and False otherwise.
"""
# check frame dimensions
rows1, cols1 = frame1.dim
rows2, cols2 = frame2.dim
assert rows1 == rows2 and cols1 == cols2, "failed dim check! frame 1 rows:{0} frame 2 rows:{1} frame 1 cols:{2} " \
"frame2 cols:{3}".format(rows1, rows2, cols1, cols2)
na_frame1 = frame1.isna().sum().sum(axis=1)[:,0]
na_frame2 = frame2.isna().sum().sum(axis=1)[:,0]
if compare_NA: # check number of missing values
assert na_frame1.flatten() == na_frame2.flatten(), "failed numbers of NA check! Frame 1 NA number: {0}, frame 2 " \
"NA number: {1}".format(na_frame1, na_frame2)
# check column types are the same before proceeding to check each row content.
for col_ind in range(cols1):
c1_key = frame1.columns[col_ind]
c2_key = frame2.columns[col_ind]
c2_type = frame2.types[c2_key]
c1_type = frame1.types[c1_key]
print("###### Comparing column: {0} and column type is {1}.".format(col_ind, c1_type))
if strict: # every column type must match
assert c1_type == c2_type, "failed column type check! frame1 col type: {0}, frame2 col type: " \
"{1}".format(c1_type, c2_type)
else:
if str(c2_type) == 'enum': # orc files do not have enum column type. We convert it here
frame1[col_ind].asfactor()
# compare string
if (str(c1_type) == 'string') or (str(c1_type) == 'enum'):
compareOneStringColumn(frame1, frame2, col_ind, rows1, numElements)
else:
if str(c2_type) == 'time': # compare time columns
compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_time, numElements)
else:
compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_numeric, numElements)
return True
def compareOneStringColumn(frame1, frame2, col_ind, rows, numElements):
"""
This function will compare two String columns of two H2O frames to make sure that they are the same.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param col_ind: integer denoting column index to compare the two frames
:param rows: integer denoting number of rows in the column
:param numElements: integer to denote number of rows to compare. Done to reduce compare time
:return: None. Will throw exceptions if comparison failed.
"""
row_indices = list(range(rows))
if numElements > 0:
random.shuffle(row_indices)
else:
numElements = rows
for ele_ind in range(numElements):
row_ind = row_indices[ele_ind]
val1 = frame1[row_ind, col_ind]
val2 = frame2[row_ind, col_ind]
assert val1 == val2, "failed frame values check! frame1 value: {0}, frame2 value: {1} at row {2}, column " \
"{3}".format(val1, val2, row_ind, col_ind)
def compareOneNumericColumn(frame1, frame2, col_ind, rows, tolerance, numElements):
"""
This function compares two numeric columns of two H2O frames to make sure that they are close.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param col_ind: integer denoting column index to compare the two frames
:param rows: integer denoting number of rows in the column
:param tolerance: double parameter to limit numerical value difference.
:param numElements: integer to denote number of rows to compare. Done to reduce compare time.
:return: None. Will throw exceptions if comparison failed.
"""
row_indices = []
if numElements > 0:
row_indices = random.sample(range(rows), numElements)
else:
numElements = rows # Compare all elements
row_indices = list(range(rows))
for ele_ind in range(numElements):
row_ind = row_indices[ele_ind]
val1 = frame1[row_ind, col_ind]
val2 = frame2[row_ind, col_ind]
if not(math.isnan(val1)) and not(math.isnan(val2)): # both frames contain valid elements
diff = abs(val1-val2)/max(1, abs(val1), abs(val2))
assert diff <= tolerance, "failed frame values check! frame1 value = {0}, frame2 value = {1}, " \
"at row {2}, column {3}. The difference is {4}.".format(val1, val2, row_ind,
col_ind, diff)
elif math.isnan(val1) and math.isnan(val2): # both frame contains missing values
continue
else: # something is wrong, one frame got a missing value while the other is fine.
assert 1 == 2, "failed frame values check! frame1 value {0}, frame2 value {1} at row {2}, " \
"column {3}".format(val1, val2, row_ind, col_ind)
import warnings
def expect_warnings(filewithpath, warn_phrase="warn", warn_string_of_interest="warn", number_of_times=1, in_hdfs=False):
"""
This function will execute a command to run and analyze the print outs of
running the command. The goal here is to capture any warnings that we may expect
out of running those commands.
:param filewithpath: name of file to be parsed with path
:param warn_phrase: capture the warning header, sometimes it is warn or userwarn.
:param warn_string_of_interest: specific warning message string
:param number_of_times: number of warning lines we are expecting.
:return: True if warning was found and False otherwise
"""
number_warngings = 0
buffer = StringIO() # redirect warning messages to string buffer for later analysis
sys.stderr = buffer
frame = None
if in_hdfs:
frame = h2o.import_file(filewithpath)
else:
frame = h2o.import_file(path=locate(filewithpath))
sys.stderr = sys.__stderr__ # redirect it back to stdout.
try: # for python 2.7
if len(buffer.buflist) > 0:
for index in range(len(buffer.buflist)):
print("*** captured warning message: {0}".format(buffer.buflist[index]))
if (warn_phrase in buffer.buflist[index]) and (warn_string_of_interest in buffer.buflist[index]):
number_warngings = number_warngings+1
except: # for python 3.
warns = buffer.getvalue()
print("*** captured warning message: {0}".format(warns))
if (warn_phrase in warns) and (warn_string_of_interest in warns):
number_warngings = number_warngings+1
print("Number of warnings found: {0} and number of times that warnings should appear {1}.".format(number_warngings,
number_of_times))
if number_warngings >= number_of_times:
return True
else:
return False
def compare_frame_summary(frame1_summary, frame2_summary, compareNames=False, compareTypes=False):
"""
This method is written to compare the frame summary between two frames.
:param frame1_summary:
:param frame2_summary:
:param compareNames:
:param compareTypes:
:return:
"""
frame1_column_number = len(frame1_summary)
frame2_column_number = len(frame2_summary)
assert frame1_column_number == frame2_column_number, "failed column number check! Frame 1 column number: {0}," \
"frame 2 column number: {1}".format(frame1_column_number,
frame2_column_number)
for col_index in range(frame1_column_number): # check summary for each column
for key_val in list(frame1_summary[col_index]):
if not(compareNames) and (str(key_val) == 'label'):
continue
if not(compareTypes) and (str(key_val) == 'type'):
continue
if str(key_val) == 'precision': # skip comparing precision
continue
val1 = frame1_summary[col_index][key_val]
val2 = frame2_summary[col_index][key_val]
if isinstance(val1, list) or isinstance(val1, dict):
if isinstance(val1, dict):
assert val1 == val2, "failed column summary comparison for column {0} and summary " \
"type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if len(val1) > 0:
# find if elements are float
float_found = False
for ind in range(len(val1)):
if isinstance(val1[ind], float):
float_found = True
break
if float_found:
for ind in range(len(val1)):
if not(str(val1[ind] == 'NaN')):
assert abs(val1[ind]-val2[ind]) < 1e-5, "failed column summary comparison for " \
"column {0} and summary type {1}, frame 1" \
" value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val),
val1[ind], val2[ind])
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary" \
" type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if isinstance(val1, float):
assert abs(val1-val2) < 1e-5, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
def cannaryHDFSTest(hdfs_name_node, file_name):
"""
This function is written to detect if the hive-exec version is too old. It will return
True if it is too old and false otherwise.
:param hdfs_name_node:
:param file_name:
:return:
"""
url_orc = "hdfs://{0}{1}".format(hdfs_name_node, file_name)
try:
tempFrame = h2o.import_file(url_orc)
h2o.remove(tempFrame)
print("Your hive-exec version is good. Parsing success for {0}.".format(url_orc))
return False
except Exception as e:
print("Error exception is {0}".format(str(e)))
if "NoSuchFieldError: vector" in str(e):
return True
else: # exception is caused by other reasons.
return False
def extract_scoring_history_field(aModel, fieldOfInterest, takeFirst=False):
"""
Given a fieldOfInterest that are found in the model scoring history, this function will extract the list
of field values for you from the model.
:param aModel: H2O model where you want to extract a list of fields from the scoring history
:param fieldOfInterest: string representing a field of interest.
:return: List of field values or None if it cannot be found
"""
return extract_from_twoDimTable(aModel._model_json["output"]["scoring_history"], fieldOfInterest, takeFirst)
def extract_from_twoDimTable(metricOfInterest, fieldOfInterest, takeFirst=False):
"""
Given a fieldOfInterest that are found in the model scoring history, this function will extract the list
of field values for you from the model.
:param aModel: H2O model where you want to extract a list of fields from the scoring history
:param fieldOfInterest: string representing a field of interest.
:return: List of field values or None if it cannot be found
"""
allFields = metricOfInterest._col_header
if fieldOfInterest in allFields:
cellValues = []
fieldIndex = allFields.index(fieldOfInterest)
for eachCell in metricOfInterest.cell_values:
cellValues.append(eachCell[fieldIndex])
if takeFirst: # only grab the result from the first iteration.
break
return cellValues
else:
return None
def model_run_time_sorted_by_time(model_list):
"""
This function is written to sort the metrics that we care in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:return: model run time in secs sorted by order of building
"""
model_num = len(model_list)
model_runtime_sec_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1])
model_runtime_sec_list[model_index] = \
(model_list[index]._model_json["output"]["run_time"]/1000.0)
return model_runtime_sec_list
def model_seed_sorted_by_time(model_list):
"""
This function is written to find the seed used by each model in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:return: model seed sorted by order of building
"""
model_num = len(model_list)
model_seed_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1])
for pIndex in range(len(model_list.models[0]._model_json["parameters"])):
if model_list.models[index]._model_json["parameters"][pIndex]["name"]=="seed":
model_seed_list[model_index]=model_list.models[index]._model_json["parameters"][pIndex]["actual_value"]
break
return model_seed_list
def check_ignore_cols_automl(models,names,x,y):
models = sum(models.as_data_frame().values.tolist(),[])
for model in models:
if "StackedEnsemble" in model:
continue
else:
assert set(h2o.get_model(model).params["ignored_columns"]["actual"]) == set(names) - {y} - set(x), \
"ignored columns are not honored for model " + model
def compare_numeric_frames(f1, f2, prob=0.5, tol=1e-6):
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
temp1 = f1.asnumeric()
temp2 = f2.asnumeric()
for colInd in range(f1.ncol):
for rowInd in range(f2.nrow):
if (random.uniform(0,1) < prob):
if (math.isnan(temp1[rowInd, colInd])):
assert math.isnan(temp2[rowInd, colInd]), "Failed frame values check at row {2} and column {3}! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd, colInd], temp2[rowInd, colInd], rowInd, colInd)
else:
diff = abs(temp1[rowInd, colInd]-temp2[rowInd, colInd])/max(1.0, abs(temp1[rowInd, colInd]),
abs(temp2[rowInd, colInd]))
assert diff<=tol, "Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd, colInd], temp2[rowInd, colInd], rowInd, colInd)
def check_sorted_2_columns(frame1, sorted_column_indices, prob=0.5, ascending=[True, True]):
for colInd in sorted_column_indices:
for rowInd in range(0, frame1.nrow-1):
if (random.uniform(0.0,1.0) < prob):
if colInd == sorted_column_indices[0]:
if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):
if ascending[colInd]:
assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else:
assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else: # for second column
if not(math.isnan(frame1[rowInd, sorted_column_indices[0]])) and not(math.isnan(frame1[rowInd+1,sorted_column_indices[0]])):
if (frame1[rowInd,sorted_column_indices[0]]==frame1[rowInd+1, sorted_column_indices[0]]): # meaningful to compare row entries then
if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):
if ascending[colInd]:
assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else:
assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
def assert_correct_frame_operation(sourceFrame, h2oResultFrame, operString):
"""
This method checks each element of a numeric H2OFrame and throw an assert error if its value does not
equal to the same operation carried out by python.
:param sourceFrame: original H2OFrame.
:param h2oResultFrame: H2OFrame after operation on original H2OFrame is carried out.
:param operString: str representing one of 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'ceil', 'cos', 'cosh', 'cospi', 'cumprod', 'cumsum', 'digamma', 'exp', 'expm1', 'floor', 'round',
'sin', 'sign', 'round', 'sinh', 'tan', 'tanh'
:return: None.
"""
validStrings = ['acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'ceil', 'cos', 'cosh',
'exp', 'floor', 'gamma', 'lgamma', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh', 'trigamma', 'expm1']
npValidStrings = ['log2', 'sign']
nativeStrings = ['round', 'abs', 'cumsum']
multpi = ['cospi', 'sinpi', 'tanpi']
others = ['log1p', 'signif', 'trigamma', 'digamma', 'cumprod']
# check for valid operString
assert operString in validStrings+npValidStrings+nativeStrings+multpi+others, "Illegal operator " \
"{0} specified.".format(operString)
result_comp = lambda x:x # default method
if operString == "log1p":
result_comp = lambda x:math.log(x+1)
elif operString == 'signif':
result_comp = lambda x:round(x, 7)
elif operString == 'trigamma':
result_comp = lambda x:scipy.special.polygamma(1, x)
elif operString == 'digamma':
result_comp = lambda x:scipy.special.polygamma(0, x)
elif operString=='cumprod':
result_comp = lambda x:factorial(x)
# stringOperations = 'result_val = factorial(sourceFrame[row_ind, col_ind])'
elif operString in validStrings:
result_comp = lambda x:getattr(math, operString)(x)
elif operString in nativeStrings:
result_comp =lambda x:__builtins__.get(operString)(x)
stringOperations = 'result_val = '+operString+'(sourceFrame[row_ind, col_ind])'
elif operString in npValidStrings:
result_comp = lambda x:getattr(np, operString)(x)
# stringOperations = 'result_val = np.'+operString+'(sourceFrame[row_ind, col_ind])'
elif operString in multpi:
result_comp = lambda x:getattr(math, operString.split('p')[0])(x*math.pi)
#stringOperations = 'result_val = math.'+operString.split('p')[0]+'(sourceFrame[row_ind, col_ind]*math.pi)'
for col_ind in range(sourceFrame.ncols):
for row_ind in range(sourceFrame.nrows):
result_val = result_comp(sourceFrame[row_ind, col_ind])
assert abs(h2oResultFrame[row_ind, col_ind]-result_val) <= 1e-6, \
" command {0}({3}) is not working. Expected: {1}. Received: {2}".format(operString, result_val,
h2oResultFrame[row_ind, col_ind], sourceFrame[row_ind, col_ind])
def factorial(n):
"""
Defined my own factorial just in case using python2.5 or less.
:param n:
:return:
"""
if n>0 and n<2:
return 1
if n>=2:
return n*factorial(n-1)
def cumop(items, op, colInd=0): # take in one column only
res = [None]*len(items)
for index in range(len(items)):
res[index] = op(res[index-1], items[index, colInd]) if index > 0 else items[index, colInd]
return res
def compare_frames_local(f1, f2, prob=0.5, tol=1e-6):
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
for colInd in range(f1.ncol):
for rowInd in range(1,f2.nrow):
if (random.uniform(0,1) < prob):
if (math.isnan(float(temp1[rowInd][colInd]))):
assert math.isnan(float(temp2[rowInd][colInd])), "Failed frame values check at row {2} and column {3}! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd)
else:
v1 = float(temp1[rowInd][colInd])
v2 = float(temp2[rowInd][colInd])
diff = abs(v1-v2)/max(1.0, abs(v1), abs(v2))
assert diff<=tol, "Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: " \
"{1}".format(v1, v2, rowInd, colInd)
def build_save_model_GLM(params, x, train, respName):
# build a model
model = H2OGeneralizedLinearEstimator(**params)
model.train(x=x, y=respName, training_frame=train)
# save model
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
MOJONAME = regex.sub("_", model._id)
print("Downloading Java prediction model code from H2O")
TMPDIR = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath('__file__')), "..", "results", MOJONAME))
os.makedirs(TMPDIR)
model.download_mojo(path=TMPDIR) # save mojo
return model
# generate random dataset, copied from Pasha
def random_dataset(response_type, verbose=True, NTESTROWS=200):
"""Create and return a random dataset."""
if verbose: print("\nCreating a dataset for a %s problem:" % response_type)
fractions = {k + "_fraction": random.random() for k in "real categorical integer time string binary".split()}
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] /= 3
fractions["time_fraction"] /= 2
sum_fractions = sum(fractions.values())
for k in fractions:
fractions[k] /= sum_fractions
if response_type == 'binomial':
response_factors = 2
else:
response_factors = random.randint(3, 10)
df = h2o.create_frame(rows=random.randint(15000, 25000) + NTESTROWS, cols=random.randint(3, 20),
missing_fraction=0,
has_response=True, response_factors=response_factors, positive_response=True, factors=10,
**fractions)
if verbose:
print()
df.show()
return df
def getMojoName(modelID):
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
return regex.sub("_", modelID)
|
[] |
[] |
[
"NAME_NODE"
] |
[]
|
["NAME_NODE"]
|
python
| 1 | 0 | |
django_nose/runner.py
|
"""
Django test runner that invokes nose.
You can use
NOSE_ARGS = ['list', 'of', 'args']
in settings.py for arguments that you always want passed to nose.
"""
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.simple import DjangoTestSuiteRunner
import nose.core
from django_nose.plugin import ResultPlugin
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
# This is a table of Django's "manage.py test" options which
# correspond to nosetests options with a different name:
OPTION_TRANSLATION = {'--failfast': '-x'}
class NoseTestSuiteRunner(DjangoTestSuiteRunner):
def run_suite(self, nose_argv):
result_plugin = ResultPlugin()
nose.core.TestProgram(argv=nose_argv, exit=False,
addplugins=[result_plugin])
return result_plugin.result
def run_tests(self, test_labels, extra_tests=None):
"""
Run the unit tests for all the test names in the provided list.
Test names specified may be file or module names, and may optionally
indicate the test case to run by separating the module or file name
from the test case name with a colon. Filenames may be relative or
absolute. Examples:
runner.run_tests('test.module')
runner.run_tests('another.test:TestCase.test_method')
runner.run_tests('a.test:TestCase')
runner.run_tests('/path/to/test/file.py:test_function')
Returns the number of tests that failed.
"""
self.setup_test_environment()
old_names = self.setup_databases()
nose_argv = ['nosetests', '--verbosity', str(self.verbosity)]
if hasattr(settings, 'NOSE_ARGS'):
nose_argv.extend(settings.NOSE_ARGS)
# Skip over 'manage.py test' and any arguments handled by django.
django_opts = ['--noinput']
for opt in BaseCommand.option_list:
django_opts.extend(opt._long_opts)
django_opts.extend(opt._short_opts)
nose_argv.extend(OPTION_TRANSLATION.get(opt, opt)
for opt in sys.argv[2:]
if not any(opt.startswith(d) for d in django_opts))
if self.verbosity >= 1:
print ' '.join(nose_argv)
result = self.run_suite(nose_argv)
self.teardown_databases(old_names)
self.teardown_test_environment()
# suite_result expects the suite as the first argument. Fake it.
return self.suite_result({}, result)
def _get_options():
"""Return all nose options that don't conflict with django options."""
cfg_files = nose.core.all_config_files()
manager = nose.core.DefaultPluginManager()
config = nose.core.Config(env=os.environ, files=cfg_files, plugins=manager)
options = config.getParser().option_list
django_opts = [opt.dest for opt in BaseCommand.option_list] + ['version']
return tuple(o for o in options if o.dest not in django_opts and
o.action != 'help')
# Replace the builtin command options with the merged django/nose options.
NoseTestSuiteRunner.options = _get_options()
NoseTestSuiteRunner.__test__ = False
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/test_settings.py
|
# Django settings for {{ project_name }} project.
import os
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ':memory:', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'NOTASECRET'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'tests.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'dockit',
'dockit.backends.djangodocument',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
]
DOCKIT_BACKENDS = {
'default': {
'ENGINE': 'dockit.backends.djangodocument.backend.ModelDocumentStorage',
},
'djangodocument': {
'ENGINE': 'dockit.backends.djangodocument.backend.ModelDocumentStorage',
},
}
DOCKIT_INDEX_BACKENDS = {
'default': {
'ENGINE': 'dockit.backends.djangodocument.backend.ModelIndexStorage',
},
'djangodocument': {
'ENGINE': 'dockit.backends.djangodocument.backend.ModelIndexStorage',
},
}
try:
import pymongo
except ImportError:
pass
else:
from pymongo.errors import ConnectionFailure
try:
pymongo.MongoClient('localhost', 27017)
except ConnectionFailure:
pass
else:
INSTALLED_APPS.append('dockit.backends.mongo')
DOCKIT_BACKENDS['mongo'] = {
'ENGINE':'dockit.backends.mongo.backend.MongoDocumentStorage',
'HOST':'localhost',
'DB':'testdb',
'PORT': 27017,
}
DOCKIT_INDEX_BACKENDS['mongo'] = {
'ENGINE':'dockit.backends.mongo.backend.MongoIndexStorage',
'HOST':'localhost',
'DB':'testdb',
'PORT': 27017,
}
if 'TRAVIS' in os.environ:
DOCKIT_BACKENDS['mongo'] = {'ENGINE':'dockit.backends.mongo.backend.MongoDocumentStorage',
'USER':'travis',
'PASSWORD':'test',
'DB':'mydb_test',
'HOST':'127.0.0.1',
'PORT':27017,}
DOCKIT_INDEX_BACKENDS['mongo'] = {'ENGINE':'dockit.backends.mongo.backend.MongoIndexStorage',
'USER':'travis',
'PASSWORD':'test',
'DB':'mydb_test',
'HOST':'127.0.0.1',
'PORT':27017,}
if 'dockit.backends.mongo' not in INSTALLED_APPS:
INSTALLED_APPS.append('dockit.backends.mongo')
if os.environ.get('TASK_BACKEND', None) == 'celery':
DOCKIT_INDEX_BACKENDS['djangodocument']['INDEX_TASKS'] = 'dockit.backends.djangodocument.tasks.CeleryIndexTasks'
INSTALLED_APPS += ["djcelery"]
CELERY_ALWAYS_EAGER = True
import djcelery
djcelery.setup_loader()
if os.environ.get('TASK_BACKEND', None) == 'ztask':
DOCKIT_INDEX_BACKENDS['djangodocument']['INDEX_TASKS'] = 'dockit.backends.djangodocument.tasks.ZTaskIndexTasks'
INSTALLED_APPS += ["django_ztask"]
ZTASKD_ALWAYS_EAGER = True
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[] |
[] |
[
"TASK_BACKEND"
] |
[]
|
["TASK_BACKEND"]
|
python
| 1 | 0 | |
experiments/experiment1/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from omegaconf import DictConfig
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(cfg: DictConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
#logger.info(cfg)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
#logger.info(model)
# logger.info("task: {}".format(task.__class__.__name__))
# logger.info("model: {}".format(model.__class__.__name__))
# logger.info("criterion: {})".format(criterion.__class__.__name__))
# logger.info(
# "num. model params: {} (num. trained: {})".format(
# sum(p.numel() for p in model.parameters()),
# sum(p.numel() for p in model.parameters() if p.requires_grad),
# )
# )
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per GPU = {} and batch size per GPU = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
max_epoch = cfg.optimization.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= cfg.optimization.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={cfg.optimization.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None
),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
writer = SummaryWriter()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
writer.add_scalar("Loss/train", log_output['loss'], i)
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
#writer.add_scalar("Loss/valid", valid_losses[-1], i)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or num_updates >= max_update
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or num_updates >= max_update
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(cfg, valid_losses[0])
or num_updates >= max_update
or (
cfg.optimization.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60)
> cfg.optimization.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
if __name__ == "__main__":
cli_main()
|
[] |
[] |
[
"LOGLEVEL"
] |
[]
|
["LOGLEVEL"]
|
python
| 1 | 0 | |
packstack/plugins/prescript_000.py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plugin responsible for setting OpenStack global options
"""
import glob
import os
import re
import uuid
from packstack.installer import basedefs
from packstack.installer import exceptions
from packstack.installer import processors
from packstack.installer import utils
from packstack.installer import validators
from packstack.modules.common import filtered_hosts
from packstack.modules.common import is_all_in_one
from packstack.modules.ospluginutils import appendManifestFile
from packstack.modules.ospluginutils import getManifestTemplate
# ------------- Prescript Packstack Plugin Initialization --------------
PLUGIN_NAME = "Prescript"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
default_ssh_key = os.path.join(os.environ["HOME"], ".ssh/*.pub")
default_ssh_key = (glob.glob(default_ssh_key) + [""])[0]
params = {
"GLOBAL": [
{"CMD_OPTION": "ssh-public-key",
"USAGE": (
"Path to a Public key to install on servers. If a usable "
"key has not been installed on the remote servers the user "
"will be prompted for a password and this key will be "
"installed so the password will not be required again"
),
"PROMPT": (
"Enter the path to your ssh Public key to install on servers"
),
"OPTION_LIST": [],
"VALIDATORS": [
validators.validate_file,
validators.validate_sshkey
],
"PROCESSORS": [processors.process_ssh_key],
"DEFAULT_VALUE": default_ssh_key,
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_SSH_KEY",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "default-password",
"USAGE": (
"Set a default password everywhere. The default password "
"will be overriden by whatever password is set for each "
"individual service or user."
),
"PROMPT": (
"Enter a default password to be used. Leave blank for a "
"randomly generated one."
),
"OPTION_LIST": [],
"DEFAULT_VALUE": '',
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_DEFAULT_PASSWORD",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "mariadb-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install MariaDB"
),
"PROMPT": "Should Packstack install MariaDB",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_MARIADB_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_MYSQL_INSTALL']},
{"CMD_OPTION": "os-glance-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Image Service (Glance)"
),
"PROMPT": (
"Should Packstack install OpenStack Image Service (Glance)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_GLANCE_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-cinder-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Block Storage (Cinder)"
),
"PROMPT": (
"Should Packstack install OpenStack Block Storage "
"(Cinder) service"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_CINDER_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-manila-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Shared File System (Manila)"
),
"PROMPT": (
"Should Packstack install OpenStack Shared File System "
"(Manila) service"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_MANILA_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-nova-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Compute (Nova)"
),
"PROMPT": (
"Should Packstack install OpenStack Compute (Nova) service"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-neutron-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Networking (Neutron). Otherwise Nova Network "
"will be used."
),
"PROMPT": (
"Should Packstack install OpenStack Networking (Neutron) "
"service"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NEUTRON_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-horizon-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Dashboard (Horizon)"
),
"PROMPT": (
"Should Packstack install OpenStack Dashboard (Horizon)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_HORIZON_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-swift-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Object Storage (Swift)"
),
"PROMPT": (
"Should Packstack install OpenStack Object Storage (Swift)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_SWIFT_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-ceilometer-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Metering (Ceilometer)"
),
"PROMPT": (
"Should Packstack install OpenStack Metering (Ceilometer)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_CEILOMETER_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-heat-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Orchestration (Heat)"
),
"PROMPT": (
"Should Packstack install OpenStack Orchestration (Heat)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_HEAT_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-sahara-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Clustering (Sahara)"
),
"PROMPT": (
"Should Packstack install OpenStack Clustering (Sahara)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_SAHARA_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-trove-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Database (Trove)"
),
"PROMPT": (
"Should Packstack install OpenStack Database (Trove)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_TROVE_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-ironic-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"OpenStack Bare Metal (Ironic)"
),
"PROMPT": (
"Should Packstack install OpenStack Bare Metal (Ironic)"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_IRONIC_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-client-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install "
"the OpenStack Client packages. An admin \"rc\" file will "
"also be installed"
),
"PROMPT": "Should Packstack install OpenStack client tools",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_CLIENT_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "ntp-servers",
"USAGE": ("Comma separated list of NTP servers. Leave plain if "
"Packstack should not install ntpd on instances."),
"PROMPT": ("Enter a comma separated list of NTP server(s). Leave "
"plain if Packstack should not install ntpd "
"on instances."),
"OPTION_LIST": [],
"DEFAULT_VALUE": '',
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NTP_SERVERS",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "nagios-install",
"USAGE": (
"Set to 'y' if you would like Packstack to install Nagios "
"to monitor OpenStack hosts"
),
"PROMPT": (
"Should Packstack install Nagios to monitor OpenStack "
"hosts"
),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": 'y',
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NAGIOS_INSTALL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "exclude-servers",
"USAGE": (
"Comma separated list of servers to be excluded from "
"installation in case you are running Packstack the second "
"time with the same answer file and don't want Packstack "
"to touch these servers. Leave plain if you don't need to "
"exclude any server."
),
"PROMPT": (
"Enter a comma separated list of server(s) to be excluded."
" Leave plain if you don't need to exclude any server."
),
"OPTION_LIST": [],
"DEFAULT_VALUE": '',
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "EXCLUDE_SERVERS",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-debug-mode",
"USAGE": (
"Set to 'y' if you want to run OpenStack services in debug "
"mode. Otherwise set to 'n'."
),
"PROMPT": "Do you want to run OpenStack services in debug mode",
"OPTION_LIST": ["y", "n"],
"DEFAULT_VALUE": "n",
"VALIDATORS": [validators.validate_options],
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_DEBUG_MODE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CONF_NAME": "CONFIG_CONTROLLER_HOST",
"CMD_OPTION": "os-controller-host",
"USAGE": (
"The IP address of the server on which to install OpenStack"
" services specific to controller role such as API servers,"
" Horizon, etc."
),
"PROMPT": "Enter the IP address of the controller host",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ip,
validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_CEILOMETER_HOST',
'CONFIG_CINDER_HOST',
'CONFIG_GLANCE_HOST',
'CONFIG_HORIZON_HOST',
'CONFIG_HEAT_HOST',
'CONFIG_IRONIC_HOST',
'CONFIG_KEYSTONE_HOST',
'CONFIG_NAGIOS_HOST',
'CONFIG_NEUTRON_SERVER_HOST',
'CONFIG_NEUTRON_LBAAS_HOSTS',
'CONFIG_NOVA_API_HOST',
'CONFIG_NOVA_CERT_HOST',
'CONFIG_NOVA_VNCPROXY_HOST',
'CONFIG_NOVA_SCHED_HOST',
'CONFIG_OSCLIENT_HOST',
'CONFIG_SWIFT_PROXY_HOSTS']},
{"CONF_NAME": "CONFIG_COMPUTE_HOSTS",
"CMD_OPTION": "os-compute-hosts",
"USAGE": (
"The list of IP addresses of the server on which to install"
" the Nova compute service"
),
"PROMPT": (
"Enter list of IP addresses on which to install compute "
"service"
),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_multi_ip,
validators.validate_multi_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_NOVA_COMPUTE_HOSTS']},
{"CONF_NAME": "CONFIG_NETWORK_HOSTS",
"CMD_OPTION": "os-network-hosts",
"USAGE": ("The list of IP addresses of the server on which "
"to install the network service such as Nova "
"network or Neutron"),
"PROMPT": ("Enter list of IP addresses on which to install "
"network service"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_multi_ip,
validators.validate_multi_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_NEUTRON_L3_HOSTS',
'CONFIG_NEUTRON_DHCP_HOSTS',
'CONFIG_NEUTRON_METADATA_HOSTS',
'CONFIG_NOVA_NETWORK_HOSTS']},
{"CMD_OPTION": "os-vmware",
"USAGE": (
"Set to 'y' if you want to use VMware vCenter as hypervisor"
" and storage. Otherwise set to 'n'."
),
"PROMPT": (
"Do you want to use VMware vCenter as hypervisor and "
"datastore"
),
"OPTION_LIST": ["y", "n"],
"DEFAULT_VALUE": "n",
"VALIDATORS": [validators.validate_options],
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_VMWARE_BACKEND",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "os-vmware",
"USAGE": (
"Set to 'y' if you want to use VMware vCenter as hypervisor"
" and storage. Otherwise set to 'n'."
),
"PROMPT": (
"Do you want to use VMware vCenter as hypervisor and "
"datastore"
),
"OPTION_LIST": ["y", "n"],
"DEFAULT_VALUE": "n",
"VALIDATORS": [validators.validate_options],
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_VMWARE_BACKEND",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "unsupported",
"USAGE": (
"Set to 'y' if you want to use unsupported parameters. "
"This should be used only if you know what you are doing."
"Issues caused by using unsupported options won't be fixed "
"before next major release."
),
"PROMPT": (
"Enable this on your own risk. Do you want to use "
"insupported parameters"
),
"OPTION_LIST": ["y", "n"],
"DEFAULT_VALUE": "n",
"VALIDATORS": [validators.validate_options],
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_UNSUPPORTED",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"VMWARE": [
{"CMD_OPTION": "vcenter-host",
"USAGE": "The IP address of the VMware vCenter server",
"PROMPT": (
"Enter the IP address of the VMware vCenter server to use "
"with Nova"
),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ip],
"DEFAULT_VALUE": "",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_VCENTER_HOST",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "vcenter-username",
"USAGE": "The username to authenticate to VMware vCenter server",
"PROMPT": ("Enter the username to authenticate on VMware "
"vCenter server"),
"DEFAULT_VALUE": "",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_VCENTER_USER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "vcenter-password",
"USAGE": "The password to authenticate to VMware vCenter server",
"PROMPT": ("Enter the password to authenticate on VMware "
"vCenter server"),
"DEFAULT_VALUE": "",
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_VCENTER_PASSWORD",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "vcenter-cluster",
"USAGE": "The name of the vCenter cluster",
"PROMPT": "Enter the name of the vCenter datastore",
"DEFAULT_VALUE": "",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_VCENTER_CLUSTER_NAME",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"UNSUPPORTED": [
{"CONF_NAME": "CONFIG_STORAGE_HOST",
"CMD_OPTION": "os-storage-host",
"USAGE": (
"(Unsupported!) The IP address of the server on which "
"to install OpenStack services specific to storage servers "
"such as Glance and Cinder."
),
"PROMPT": "Enter the IP address of the storage host",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ip,
validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CONF_NAME": "CONFIG_SAHARA_HOST",
"CMD_OPTION": "os-sahara-host",
"USAGE": (
"(Unsupported!) The IP address of the server on which "
"to install OpenStack services specific to Sahara"
),
"PROMPT": "Enter the IP address of the Sahara host",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ip,
validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
]
}
def use_vcenter(config):
return (config['CONFIG_NOVA_INSTALL'] == 'y' and
config['CONFIG_VMWARE_BACKEND'] == 'y')
def unsupported_enabled(config):
return config['CONFIG_UNSUPPORTED'] == 'y'
groups = [
{"GROUP_NAME": "GLOBAL",
"DESCRIPTION": "Global Options",
"PRE_CONDITION": lambda x: 'yes',
"PRE_CONDITION_MATCH": "yes",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "VMWARE",
"DESCRIPTION": "vCenter Config Parameters",
"PRE_CONDITION": use_vcenter,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "UNSUPPORTED",
"DESCRIPTION": "Global unsupported options",
"PRE_CONDITION": unsupported_enabled,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
]
for group in groups:
controller.addGroup(group, params[group['GROUP_NAME']])
def initSequences(controller):
prescript_steps = [
{'title': 'Setting up ssh keys',
'functions': [install_keys]},
{'title': 'Discovering hosts\' details',
'functions': [discover]},
{'title': 'Adding pre install manifest entries',
'functions': [create_manifest]},
]
if controller.CONF['CONFIG_NTP_SERVERS']:
prescript_steps.append(
{'title': 'Installing time synchronization via NTP',
'functions': [create_ntp_manifest]})
else:
controller.MESSAGES.append('Time synchronization installation was '
'skipped. Please note that unsynchronized '
'time on server instances might be problem '
'for some OpenStack components.')
controller.addSequence("Running pre install scripts", [], [],
prescript_steps)
# -------------------------- step functions --------------------------
def install_keys_on_host(hostname, sshkeydata):
server = utils.ScriptRunner(hostname)
# TODO replace all that with ssh-copy-id
server.append("mkdir -p ~/.ssh")
server.append("chmod 500 ~/.ssh")
server.append("grep '%s' ~/.ssh/authorized_keys > /dev/null 2>&1 || "
"echo %s >> ~/.ssh/authorized_keys"
% (sshkeydata, sshkeydata))
server.append("chmod 400 ~/.ssh/authorized_keys")
server.append("restorecon -r ~/.ssh")
server.execute()
def install_keys(config, messages):
with open(config["CONFIG_SSH_KEY"]) as fp:
sshkeydata = fp.read().strip()
# If this is a --allinone install *and* we are running as root,
# we can configure the authorized_keys file locally, avoid problems
# if PasswordAuthentication is disabled.
if is_all_in_one(config) and os.getuid() == 0:
install_keys_on_host(None, sshkeydata)
else:
for hostname in filtered_hosts(config):
if '/' in hostname:
hostname = hostname.split('/')[0]
install_keys_on_host(hostname, sshkeydata)
def discover(config, messages):
"""
Discovers details about hosts.
"""
# TODO: Once Controller is refactored, move this function to it (facter can
# be used for that too).
details = {}
release_regexp = re.compile(r'^(?P<OS>.*) release (?P<release>[\d\.]*)')
config['HOST_LIST'] = list(filtered_hosts(config))
for host in config['HOST_LIST']:
details.setdefault(host, {})
server = utils.ScriptRunner(host)
# discover OS and release
server.append('cat /etc/redhat-release')
try:
rc, out = server.execute()
match = release_regexp.search(out)
if not match:
raise exceptions.ScriptRuntimeError()
except exceptions.ScriptRuntimeError:
details[host]['os'] = 'Unknown'
details[host]['release'] = 'Unknown'
else:
opsys = match.group('OS')
for pattern, surr in [('^Red Hat Enterprise Linux.*', 'RHEL'),
('^Fedora.*', 'Fedora'),
('^CentOS.*', 'CentOS'),
('^Scientific Linux.*', 'SL')]:
opsys = re.sub(pattern, surr, opsys)
details[host]['os'] = opsys
details[host]['release'] = match.group('release')
# Create the packstack tmp directory
server.clear()
server.append("mkdir -p %s" % basedefs.PACKSTACK_VAR_DIR)
# Separately create the tmp directory for this packstack run, this will
# fail if the directory already exists
host_dir = os.path.join(basedefs.PACKSTACK_VAR_DIR, uuid.uuid4().hex)
server.append("mkdir --mode 0700 %s" % host_dir)
for i in ('modules', 'resources'):
server.append("mkdir --mode 0700 %s" % os.path.join(host_dir, i))
server.execute()
details[host]['tmpdir'] = host_dir
config['HOST_DETAILS'] = details
def create_manifest(config, messages):
key = 'CONFIG_DEBUG_MODE'
config[key] = config[key] == 'y' and True or False
for hostname in filtered_hosts(config):
manifestfile = "%s_prescript.pp" % hostname
manifestdata = getManifestTemplate("prescript")
appendManifestFile(manifestfile, manifestdata)
def create_ntp_manifest(config, messages):
srvlist = [i.strip()
for i in config['CONFIG_NTP_SERVERS'].split(',')
if i.strip()]
config['CONFIG_NTP_SERVERS'] = ' '.join(srvlist)
definiton = '\n'.join(['server %s' % i for i in srvlist])
config['CONFIG_NTP_SERVER_DEF'] = '%s\n' % definiton
marker = uuid.uuid4().hex[:16]
for hostname in filtered_hosts(config):
releaseos = config['HOST_DETAILS'][hostname]['os']
releasever = config['HOST_DETAILS'][hostname]['release'].split('.')[0]
# Configure chrony for Fedora or RHEL/CentOS 7
if releaseos == 'Fedora' or releasever == '7':
manifestdata = getManifestTemplate('chrony')
appendManifestFile('%s_chrony.pp' % hostname,
manifestdata,
marker=marker)
# For previous versions, configure ntpd
else:
manifestdata = getManifestTemplate('ntpd')
appendManifestFile('%s_ntpd.pp' % hostname,
manifestdata,
marker=marker)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
demo/main.go
|
package main
import (
"context"
"log"
"os"
"github.com/google/go-github/github"
"github.com/progrium/go-githubfs"
"golang.org/x/oauth2"
)
func main() {
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_ACCESS_TOKEN")},
)
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
fs, err := githubfs.NewGitHubFs(client, "progrium", "go-githubfs", "master")
if err != nil {
panic(err)
}
f, err := fs.OpenFile("test/baz2", os.O_APPEND, 0644)
if err != nil {
log.Fatal(err)
}
f.Write([]byte("Hello world....\n"))
err = f.Close()
if err != nil {
log.Fatal(err)
}
//fmt.Printf("%# v", pretty.Formatter(fs))
}
|
[
"\"GITHUB_ACCESS_TOKEN\""
] |
[] |
[
"GITHUB_ACCESS_TOKEN"
] |
[]
|
["GITHUB_ACCESS_TOKEN"]
|
go
| 1 | 0 | |
metricbeat/module/etcd/metrics/metrics_integration_test.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build integration
package metrics
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/tests/compose"
mbtest "github.com/elastic/beats/metricbeat/mb/testing"
)
func TestFetch(t *testing.T) {
logp.TestingSetup()
compose.EnsureUp(t, "etcd")
f := mbtest.NewReportingMetricSetV2(t, getConfig())
events, errs := mbtest.ReportingFetchV2(f)
if len(errs) > 0 {
t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs)
}
assert.NotEmpty(t, events)
t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), events[0])
}
func getConfig() map[string]interface{} {
return map[string]interface{}{
"module": "etcd",
"metricsets": []string{"metrics"},
"hosts": []string{GetEnvHost() + ":" + GetEnvPort()},
}
}
func GetEnvHost() string {
host := os.Getenv("ETCD_HOST")
if len(host) == 0 {
host = "127.0.0.1"
}
return host
}
func GetEnvPort() string {
port := os.Getenv("ETCD_PORT")
if len(port) == 0 {
port = "2379"
}
return port
}
|
[
"\"ETCD_HOST\"",
"\"ETCD_PORT\""
] |
[] |
[
"ETCD_PORT",
"ETCD_HOST"
] |
[]
|
["ETCD_PORT", "ETCD_HOST"]
|
go
| 2 | 0 | |
sources/pil/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='large-image-source-pil',
use_scm_version={'root': '../..', 'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='A Pillow tilesource for large_image',
long_description='See the large-image package for more details.',
author='Kitware, Inc.',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=[
'large-image>=1.0.0',
],
extras_require={
'girder': 'girder-large-image>=1.0.0',
},
license='Apache Software License 2.0',
keywords='large_image, tile source',
packages=find_packages(exclude=['test', 'test.*']),
url='https://github.com/girder/large_image',
entry_points={
'large_image.source': [
'pil = large_image_source_pil:PILFileTileSource'
],
'girder_large_image.source': [
'pil = large_image_source_pil.girder_source:PILGirderTileSource'
]
},
)
|
[] |
[] |
[
"CIRCLE_BRANCH"
] |
[]
|
["CIRCLE_BRANCH"]
|
python
| 1 | 0 | |
docker/client/client.go
|
package client
import (
"fmt"
"net/http"
"os"
"path/filepath"
"runtime"
cliconfig "github.com/docker/cli/cli/config"
dockerclient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/docker/libcompose/version"
)
const (
// DefaultAPIVersion is the default docker API version set by libcompose
DefaultAPIVersion = "v1.20"
defaultTrustKeyFile = "key.json"
defaultCaFile = "ca.pem"
defaultKeyFile = "key.pem"
defaultCertFile = "cert.pem"
)
var (
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
)
func init() {
if dockerCertPath == "" {
dockerCertPath = cliconfig.Dir()
}
}
// Options holds docker client options (host, tls, ..)
type Options struct {
TLS bool
TLSVerify bool
TLSOptions tlsconfig.Options
TrustKey string
Host string
APIVersion string
}
var singleton *dockerclient.Client
// Create creates a docker client based on the specified options.
func Create(c Options) (dockerclient.APIClient, error) {
if singleton != nil {
return singleton, nil
}
if c.Host == "" {
if os.Getenv("DOCKER_API_VERSION") == "" {
os.Setenv("DOCKER_API_VERSION", DefaultAPIVersion)
}
myclient, err := dockerclient.NewEnvClient()
if err != nil {
return nil, err
}
singleton = myclient
return myclient, nil
}
apiVersion := c.APIVersion
if apiVersion == "" {
apiVersion = DefaultAPIVersion
}
if c.TLSOptions.CAFile == "" {
c.TLSOptions.CAFile = filepath.Join(dockerCertPath, defaultCaFile)
}
if c.TLSOptions.CertFile == "" {
c.TLSOptions.CertFile = filepath.Join(dockerCertPath, defaultCertFile)
}
if c.TLSOptions.KeyFile == "" {
c.TLSOptions.KeyFile = filepath.Join(dockerCertPath, defaultKeyFile)
}
if c.TrustKey == "" {
c.TrustKey = filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile)
}
if c.TLSVerify {
c.TLS = true
}
if c.TLS {
c.TLSOptions.InsecureSkipVerify = !c.TLSVerify
}
var httpClient *http.Client
if c.TLS {
config, err := tlsconfig.Client(c.TLSOptions)
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: config,
}
clientURL, err := dockerclient.ParseHostURL(c.Host)
if err != nil {
return nil, err
}
if err := sockets.ConfigureTransport(tr, clientURL.Scheme, clientURL.Host); err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: tr,
}
}
customHeaders := map[string]string{}
customHeaders["User-Agent"] = fmt.Sprintf("Libcompose-Client/%s (%s)", version.VERSION, runtime.GOOS)
myclient, err := dockerclient.NewClient(c.Host, apiVersion, httpClient, customHeaders)
if err != nil {
return nil, err
}
singleton = myclient
return myclient, nil
}
|
[
"\"DOCKER_CERT_PATH\"",
"\"DOCKER_API_VERSION\""
] |
[] |
[
"DOCKER_API_VERSION",
"DOCKER_CERT_PATH"
] |
[]
|
["DOCKER_API_VERSION", "DOCKER_CERT_PATH"]
|
go
| 2 | 0 | |
pydub/utils.py
|
from __future__ import division
from math import log, ceil, floor
import os
import re
from subprocess import Popen, PIPE
import sys
from tempfile import TemporaryFile
from warnings import warn
import json
try:
import audioop
except ImportError:
import pyaudioop as audioop
if sys.version_info >= (3, 0):
basestring = str
FRAME_WIDTHS = {
8: 1,
16: 2,
32: 4,
}
ARRAY_TYPES = {
8: "b",
16: "h",
32: "i",
}
ARRAY_RANGES = {
8: (-0x80, 0x7f),
16: (-0x8000, 0x7fff),
32: (-0x80000000, 0x7fffffff),
}
def get_frame_width(bit_depth):
return FRAME_WIDTHS[bit_depth]
def get_array_type(bit_depth, signed=True):
t = ARRAY_TYPES[bit_depth]
if not signed:
t = t.upper()
return t
def get_min_max_value(bit_depth):
return ARRAY_RANGES[bit_depth]
def _fd_or_path_or_tempfile(fd, mode='w+b', tempfile=True):
if fd is None and tempfile:
fd = TemporaryFile(mode=mode)
if isinstance(fd, basestring):
fd = open(fd, mode=mode)
try:
if isinstance(fd, os.PathLike):
fd = open(fd, mode=mode)
except AttributeError:
# module os has no attribute PathLike, so we're on python < 3.6.
# The protocol we're trying to support doesn't exist, so just pass.
pass
return fd
def db_to_float(db, using_amplitude=True):
"""
Converts the input db to a float, which represents the equivalent
ratio in power.
"""
db = float(db)
if using_amplitude:
return 10 ** (db / 20)
else: # using power
return 10 ** (db / 10)
def ratio_to_db(ratio, val2=None, using_amplitude=True):
"""
Converts the input float to db, which represents the equivalent
to the ratio in power represented by the multiplier passed in.
"""
ratio = float(ratio)
# accept 2 values and use the ratio of val1 to val2
if val2 is not None:
ratio = ratio / val2
# special case for multiply-by-zero (convert to silence)
if ratio == 0:
return -float('inf')
if using_amplitude:
return 20 * log(ratio, 10)
else: # using power
return 10 * log(ratio, 10)
def register_pydub_effect(fn, name=None):
"""
decorator for adding pydub effects to the AudioSegment objects.
example use:
@register_pydub_effect
def normalize(audio_segment):
...
or you can specify a name:
@register_pydub_effect("normalize")
def normalize_audio_segment(audio_segment):
...
"""
if isinstance(fn, basestring):
name = fn
return lambda fn: register_pydub_effect(fn, name)
if name is None:
name = fn.__name__
from .audio_segment import AudioSegment
setattr(AudioSegment, name, fn)
return fn
def make_chunks(audio_segment, chunk_length):
"""
Breaks an AudioSegment into chunks that are <chunk_length> milliseconds
long.
if chunk_length is 50 then you'll get a list of 50 millisecond long audio
segments back (except the last one, which can be shorter)
"""
number_of_chunks = ceil(len(audio_segment) / float(chunk_length))
return [audio_segment[i * chunk_length:(i + 1) * chunk_length]
for i in range(int(number_of_chunks))]
def which(program):
"""
Mimics behavior of UNIX which command.
"""
#Add .exe program extension for windows support
if os.name == "nt" and not program.endswith(".exe"):
program += ".exe"
envdir_list = [os.curdir] + os.environ["PATH"].split(os.pathsep)
for envdir in envdir_list:
program_path = os.path.join(envdir, program)
if os.path.isfile(program_path) and os.access(program_path, os.X_OK):
return program_path
def get_encoder_name():
"""
Return enconder default application for system, either avconv or ffmpeg
"""
if which("avconv"):
return "avconv"
elif which("ffmpeg"):
return "ffmpeg"
else:
# should raise exception
warn("Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work", RuntimeWarning)
return "ffmpeg"
def get_player_name():
"""
Return enconder default application for system, either avconv or ffmpeg
"""
if which("avplay"):
return "avplay"
elif which("ffplay"):
return "ffplay"
else:
# should raise exception
warn("Couldn't find ffplay or avplay - defaulting to ffplay, but may not work", RuntimeWarning)
return "ffplay"
def get_prober_name():
"""
Return probe application, either avconv or ffmpeg
"""
if which("avprobe"):
return "avprobe"
elif which("ffprobe"):
return "ffprobe"
else:
# should raise exception
warn("Couldn't find ffprobe or avprobe - defaulting to ffprobe, but may not work", RuntimeWarning)
return "ffprobe"
def fsdecode(filename):
"""Wrapper for os.fsdecode which was introduced in python 3.2 ."""
if sys.version_info >= (3, 2):
PathLikeTypes = (basestring, bytes)
if sys.version_info >= (3, 6):
PathLikeTypes += (os.PathLike,)
if isinstance(filename, PathLikeTypes):
return os.fsdecode(filename)
else:
if isinstance(filename, bytes):
return filename.decode(sys.getfilesystemencoding())
if isinstance(filename, basestring):
return filename
raise TypeError("type {0} not accepted by fsdecode".format(type(filename)))
def mediainfo_json(filepath):
"""Return json dictionary with media info(codec, duration, size, bitrate...) from filepath
"""
prober = get_prober_name()
command_args = [
"-v", "info",
"-show_format",
"-show_streams",
]
try:
command_args += [fsdecode(filepath)]
stdin_parameter = None
stdin_data = None
except TypeError:
command_args += ["-"]
stdin_parameter = PIPE
file = _fd_or_path_or_tempfile(filepath, 'rb', tempfile=False)
file.seek(0)
stdin_data = file.read()
command = [prober, '-of', 'json'] + command_args
res = Popen(command, stdin=stdin_parameter, stdout=PIPE, stderr=PIPE)
output, stderr = res.communicate(input=stdin_data)
output = output.decode("utf-8", 'ignore')
stderr = stderr.decode("utf-8", 'ignore')
info = json.loads(output)
if not info:
# If ffprobe didn't give any information, just return it
# (for example, because the file doesn't exist)
return info
# avprobe sometimes gives more information on stderr than
# on the json output. The information has to be extracted
# from lines of the format of:
# ' Stream #0:0: Audio: flac, 88200 Hz, stereo, s32 (24 bit)'
extra_info = {}
for line in stderr.split("\n"):
match = re.match(' *Stream #0[:\.]([0-9]+)(\(\w+\))?', line)
if match:
stream_id = int(match.group(1))
tokens = [x.strip()
for x in re.split('[:,]', line[match.end():]) if x]
extra_info[stream_id] = tokens
audio_streams = [x for x in info['streams'] if x['codec_type'] == 'audio']
if len(audio_streams) == 0:
return info
# We just operate on the first audio stream in case there are more
stream = audio_streams[0]
def set_property(stream, prop, value):
if prop not in stream or stream[prop] == 0:
stream[prop] = value
for token in extra_info[stream['index']]:
m = re.match('([su]([0-9]{1,2})p?) \(([0-9]{1,2}) bit\)$', token)
m2 = re.match('([su]([0-9]{1,2})p?)( \(default\))?$', token)
if m:
set_property(stream, 'sample_fmt', m.group(1))
set_property(stream, 'bits_per_sample', int(m.group(2)))
set_property(stream, 'bits_per_raw_sample', int(m.group(3)))
elif m2:
set_property(stream, 'sample_fmt', m2.group(1))
set_property(stream, 'bits_per_sample', int(m2.group(2)))
set_property(stream, 'bits_per_raw_sample', int(m2.group(2)))
elif re.match('(flt)p?( \(default\))?$', token):
set_property(stream, 'sample_fmt', token)
set_property(stream, 'bits_per_sample', 32)
set_property(stream, 'bits_per_raw_sample', 32)
elif re.match('(dbl)p?( \(default\))?$', token):
set_property(stream, 'sample_fmt', token)
set_property(stream, 'bits_per_sample', 64)
set_property(stream, 'bits_per_raw_sample', 64)
return info
def mediainfo(filepath):
"""Return dictionary with media info(codec, duration, size, bitrate...) from filepath
"""
prober = get_prober_name()
command_args = [
"-v", "quiet",
"-show_format",
"-show_streams",
filepath
]
command = [prober, '-of', 'old'] + command_args
res = Popen(command, stdout=PIPE)
output = res.communicate()[0].decode("utf-8")
if res.returncode != 0:
command = [prober] + command_args
output = Popen(command, stdout=PIPE).communicate()[0].decode("utf-8")
rgx = re.compile(r"(?:(?P<inner_dict>.*?):)?(?P<key>.*?)\=(?P<value>.*?)$")
info = {}
if sys.platform == 'win32':
output = output.replace("\r", "")
for line in output.split("\n"):
# print(line)
mobj = rgx.match(line)
if mobj:
# print(mobj.groups())
inner_dict, key, value = mobj.groups()
if inner_dict:
try:
info[inner_dict]
except KeyError:
info[inner_dict] = {}
info[inner_dict][key] = value
else:
info[key] = value
return info
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
pybuild/packages/libxslt.py
|
from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
class Libxslt(Package):
source = GitSource('https://github.com/QPYPI/libxslt.git', alias='libxslt', branch='qpyc-1.1.32')
patches = [
#LocalPatch('0001-Fix-libtoolize-s-issue-in-autogen.sh'),
]
def prepare(self):
pass
def build(self):
import os
ANDROID_NDK = os.getenv('ANDROID_NDK')
CLANG_FLAGS_QPY = os.getenv('CLANG_FLAGS_QPY')
LIBXML2 = os.path.join(os.getcwd(),'src/libxml2')
CC = f"{ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/bin/clang {CLANG_FLAGS_QPY}"
self.system(f'./autogen.sh')
self.system(f'CC=\"{CC}\" CXX=\"{os.getenv("CXX")}\" LDFLAGS=\"-lgnustl_shared -L{ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a -L{ANDROID_NDK}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x/armv7-a\" '\
f'./configure --host=arm-linux-androideabi --target=arm-linux-androideabi --without-plugins --without-debug --without-crypto --without-python --with-libxml-src={LIBXML2} --prefix={self.destdir()}')
self.system(f'make')
self.system(f'make install')
self.system(
f'if [ -e {self.destdir()}/lib/libxslt.so ] ; then mv {self.destdir()}/lib/libxslt.so {self.destdir()}/lib/libxslt.so.old; fi'
)
self.system(
f'if [ -e {self.destdir()}/lib/libexslt.so ] ; then mv {self.destdir()}/lib/libexslt.so {self.destdir()}/lib/libxeslt.so.old; fi'
)
def fresh(self):
return True
|
[] |
[] |
[
"CXX",
"CLANG_FLAGS_QPY",
"ANDROID_NDK"
] |
[]
|
["CXX", "CLANG_FLAGS_QPY", "ANDROID_NDK"]
|
python
| 3 | 0 | |
grow/commands/group.py
|
"""Base command for grow."""
from grow.deployments.destinations import local as local_destination
import click
import os
import pkg_resources
version = pkg_resources.get_distribution('grow').version
HELP_TEXT = ('Grow is a declarative file-based website generator. Read docs at '
'https://grow.dev. This is version {}.'.format(version))
# pylint: disable=unused-argument
@click.group(help=HELP_TEXT)
@click.version_option(prog_name='grow', version=version)
@click.option('--auth', help='Information used to sign in to services that'
' require authentication. --auth should be an email address.',
envvar='GROW_AUTH')
@click.option('--clear-auth', default=False, is_flag=True,
help='Clears stored auth information.')
@click.option('--auth-key-file', help='Path to a private key file used for'
' services that require authentication.', envvar='GROW_KEY_FILE')
@click.option(
'--interactive-auth', default=False, is_flag=True,
envvar='INTERACTIVE_AUTH',
help='Whether to automatically open an authorization page in your'
' default web browser for any steps that require authentication.'
' If you are running Grow on a machine with access to a web browser,'
' you may use --interactive-auth to automatically open the web'
' browser. By default, this option is turned off, requiring you to'
' manually copy and paste an authorization code.')
@click.option('--profile',
default=False, is_flag=True,
help='Show report of pod operation timing for performance analysis.')
def grow(auth, clear_auth, auth_key_file, interactive_auth, profile):
"""Grow CLI command."""
if interactive_auth not in (None, False):
os.environ['INTERACTIVE_AUTH'] = str(interactive_auth)
if auth is not None:
os.environ['AUTH_EMAIL_ADDRESS'] = str(auth)
if auth_key_file is not None:
os.environ['AUTH_KEY_FILE'] = str(auth_key_file)
if clear_auth:
os.environ['CLEAR_AUTH'] = '1'
@grow.resultcallback()
def process_subcommands(pod, profile, **_):
"""Handle flags that need to process after the sub command."""
if not pod:
return
if profile:
destination = local_destination.LocalDestination(
local_destination.Config())
destination.pod = pod
destination.export_profile_report()
|
[] |
[] |
[
"AUTH_EMAIL_ADDRESS",
"CLEAR_AUTH",
"AUTH_KEY_FILE",
"INTERACTIVE_AUTH"
] |
[]
|
["AUTH_EMAIL_ADDRESS", "CLEAR_AUTH", "AUTH_KEY_FILE", "INTERACTIVE_AUTH"]
|
python
| 4 | 0 | |
VAE_MLP/VAE_MLP_cat_model.py
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as f
from torch.autograd import Variable
import os
import numpy as np
from tqdm import tqdm
def reparameterize(mu, logvar):
eps = Variable(torch.randn(mu.size(0), mu.size(1))).cuda()
z = mu + eps * torch.exp(logvar / 2)
return z
class VAE_MLP_CAT(nn.Module):
def __init__(self, latent_code_num, hidden):
super(VAE_MLP_CAT, self).__init__()
self.encoder = nn.Sequential(
# 1, 124, 32
nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2, inplace=True),
# 32, 62, 16
nn.Conv2d(32, 64, kernel_size=4, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# 64, 15, 15
nn.Conv2d(64, 128, kernel_size=5, stride=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# 128, 20, 5
)
self.fc11 = nn.Linear(128 * 10 * 5 * 2, latent_code_num)
self.fc12 = nn.Linear(128 * 10 * 5 * 2, latent_code_num)
self.mlp = nn.Sequential(
torch.nn.Linear(latent_code_num + 1, hidden),
torch.nn.Tanh(),
torch.nn.Linear(hidden, 1)
)
for p in self.mlp.parameters():
torch.nn.init.normal_(p, mean=0, std=0.1)
torch.nn.init.constant_(self.mlp[0].bias, val=0.)
torch.nn.init.constant_(self.mlp[2].bias, val=0.)
self.fc2 = nn.Linear(latent_code_num, 128 * 10 * 5 * 2)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 32, kernel_size=4, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(32, 1, kernel_size=6, stride=3, padding=1),
nn.Sigmoid()
)
def get_reparameterized_code(self, x):
out1, out2 = self.encoder(x), self.encoder(x) # batch_s, 8, 7, 7
mu = self.fc11(out1.view(out1.size(0), -1)) # batch_s, latent
logvar = self.fc12(out2.view(out2.size(0), -1)) # batch_s, latent
z = self.reparameterize(mu, logvar) # batch_s, latent
return z
def forward(self, x, t):
out1, out2 = self.encoder(x), self.encoder(x) # batch_s, 8, 7, 7
mu = self.fc11(out1.view(out1.size(0), -1)) # batch_s, latent
logvar = self.fc12(out2.view(out2.size(0), -1)) # batch_s, latent
pre = self.mlp(torch.cat((t, mu), dim=1))
z = reparameterize(mu, logvar) # batch_s, latent
out3 = self.fc2(z).view(z.size(0), 128, 20, 5) # batch_s, 8, 7, 7
return self.decoder(out3), mu, logvar, pre
def predict(self, x, t):
out1, out2 = self.encoder(x), self.encoder(x) # batch_s, 8, 7, 7
mu = self.fc11(out1.view(out1.size(0), -1)) # batch_s, latent
pre = self.mlp(torch.cat((t, mu), dim=1))
return pre
def get_mid(self, x):
out1, out2 = self.encoder(x), self.encoder(x)
mu = self.fc11(out1.view(out1.size(0), -1))
return mu
def decode(self, z):
out3 = self.fc2(z).view(1, 128, 20, 5)
return self.decoder(out3)
def loss_func(recon_x, x, mu, logvar, pre_, label_):
mse = torch.nn.MSELoss()
binary_cross_entropy = f.binary_cross_entropy(recon_x, x, size_average=False)
k_l_divergence = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
mse_loss = mse(pre_, label_)
return binary_cross_entropy + k_l_divergence + mse_loss
def train_vae_mlp(latent_code_num, hidden, params, device):
print('Totally ' + str(params['VAE_epoch_num']) + ' epochs to train')
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
thermal_conductivity_train_loader = torch.load('Data/thermal_conductivity_vae_mlp_train_loader.pkl')
heat_capacity_train_loader = torch.load('Data/heat_capacity_vae_mlp_train_loader.pkl')
heat_capacity_vae_mlp = VAE_MLP_CAT(latent_code_num, hidden).cuda()
thermal_conductivity_vae_mlp = VAE_MLP_CAT(latent_code_num, hidden).cuda()
thermal_conductivity_optimizer = optim.Adam(
thermal_conductivity_vae_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
heat_capacity_optimizer = optim.Adam(
heat_capacity_vae_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
thermal_conductivity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
heat_capacity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
thermal_conductivity_total_loss_list *= 5000000
heat_capacity_total_loss_list *= 5000000
thermal_conductivity_model_file_name = \
'Model_pkl/VAE_MLP_CAT_thermal_conductivity_latent_' + str(latent_code_num) + \
'_structure_' + str(hidden) + '.pkl'
heat_capacity_model_file_name = \
'Model_pkl/VAE_MLP_CAT_heat_capacity_latent_' + str(latent_code_num) + \
'_structure_' + str(hidden) + '.pkl'
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
thermal_conductivity_vae_mlp.train()
for i, data in enumerate(tqdm(thermal_conductivity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
thermal_conductivity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
label = data[3].cuda().reshape(data[3].shape[0], 1).type(torch.cuda.FloatTensor)
recon_x, mu, logvar, pre = thermal_conductivity_vae_mlp.forward(one_hot, t)
recon_x = recon_x[:, :, :one_hot.shape[2], :one_hot.shape[3]]
loss = loss_func(recon_x, one_hot, mu, logvar, pre, label)
loss.backward()
total_loss += loss.data.item() / 1000
thermal_conductivity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(thermal_conductivity_train_loader.dataset)))
thermal_conductivity_total_loss_list[epoch] = total_loss / len(thermal_conductivity_train_loader.dataset)
if np.argmin(thermal_conductivity_total_loss_list) == epoch:
torch.save(heat_capacity_vae_mlp, thermal_conductivity_model_file_name)
print('best result, saving the model to ' + thermal_conductivity_model_file_name)
elif np.argmin(thermal_conductivity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
heat_capacity_vae_mlp.train()
for i, data in enumerate(tqdm(heat_capacity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
heat_capacity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
recon_x, mu, logvar, pre = heat_capacity_vae_mlp.forward(one_hot, t)
recon_x = recon_x[:, :, :one_hot.shape[2], :one_hot.shape[3]]
loss = loss_func(recon_x, one_hot, mu, logvar, pre, t)
loss.backward()
total_loss += loss.data.item() / 1000
heat_capacity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(heat_capacity_train_loader.dataset)))
heat_capacity_total_loss_list[epoch] = total_loss / len(heat_capacity_train_loader.dataset)
if np.argmin(heat_capacity_total_loss_list) == epoch:
torch.save(heat_capacity_vae_mlp, heat_capacity_model_file_name)
print('best result, saving the model to ' + heat_capacity_model_file_name)
elif np.argmin(thermal_conductivity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
tests/unit/netapi/rest_tornado/test_handlers.py
|
# coding: utf-8
# Import Python libs
from __future__ import absolute_import
import json
import yaml
import os
# Import Salt Testing Libs
from salttesting.unit import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../..')
import integration # pylint: disable=import-error
# Import Salt libs
try:
from salt.netapi.rest_tornado import saltnado
from salt.netapi.rest_tornado import saltnado_websockets
HAS_TORNADO = True
except ImportError:
HAS_TORNADO = False
import salt.auth
# Import 3rd-party libs
# pylint: disable=import-error
try:
import tornado.testing
import tornado.concurrent
from tornado.testing import AsyncHTTPTestCase, gen_test
from tornado.httpclient import HTTPRequest, HTTPError
from tornado.websocket import websocket_connect
HAS_TORNADO = True
except ImportError:
HAS_TORNADO = False
# Let's create a fake AsyncHTTPTestCase so we can properly skip the test case
class AsyncHTTPTestCase(object):
pass
import salt.ext.six as six
from salt.ext.six.moves.urllib.parse import urlencode, urlparse # pylint: disable=no-name-in-module
# pylint: enable=import-error
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
@skipIf(HAS_TORNADO is False, 'The tornado package needs to be installed') # pylint: disable=W0223
class SaltnadoTestCase(integration.ModuleCase, AsyncHTTPTestCase):
'''
Mixin to hold some shared things
'''
content_type_map = {'json': 'application/json',
'json-utf8': 'application/json; charset=utf-8',
'yaml': 'application/x-yaml',
'text': 'text/plain',
'form': 'application/x-www-form-urlencoded',
'xml': 'application/xml',
'real-accept-header-json': 'application/json, text/javascript, */*; q=0.01',
'real-accept-header-yaml': 'application/x-yaml, text/yaml, */*; q=0.01'}
auth_creds = (
('username', 'saltdev_api'),
('password', 'saltdev'),
('eauth', 'auto'))
@property
def auth_creds_dict(self):
return dict(self.auth_creds)
@property
def opts(self):
return self.get_config('client_config', from_scratch=True)
@property
def mod_opts(self):
return self.get_config('minion', from_scratch=True)
@property
def auth(self):
if not hasattr(self, '__auth'):
self.__auth = salt.auth.LoadAuth(self.opts)
return self.__auth
@property
def token(self):
''' Mint and return a valid token for auth_creds '''
return self.auth.mk_token(self.auth_creds_dict)
def setUp(self):
# FIXME
# The try/except here and in tearDownis a temporary fix, pending the release of a
# new salt version, later than 08.22.16
try:
super(SaltnadoTestCase, self).setUp()
except (NotImplementedError, AttributeError):
pass
self.async_timeout_prev = os.environ.pop('ASYNC_TEST_TIMEOUT', None)
os.environ['ASYNC_TEST_TIMEOUT'] = str(30)
def tearDown(self):
try:
super(SaltnadoTestCase, self).tearDown()
except AttributeError:
pass
if self.async_timeout_prev is None:
os.environ.pop('ASYNC_TEST_TIMEOUT', None)
else:
os.environ['ASYNC_TEST_TIMEOUT'] = self.async_timeout_prev
def build_tornado_app(self, urls):
application = tornado.web.Application(urls, debug=True)
application.auth = self.auth
application.opts = self.opts
application.mod_opts = self.mod_opts
return application
class TestBaseSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
class StubHandler(saltnado.BaseSaltAPIHandler): # pylint: disable=W0223
def get(self, *args, **kwargs):
return self.echo_stuff()
def post(self):
return self.echo_stuff()
def echo_stuff(self):
ret_dict = {'foo': 'bar'}
attrs = ('token',
'start',
'connected',
'lowstate',
)
for attr in attrs:
ret_dict[attr] = getattr(self, attr)
self.write(self.serialize(ret_dict))
urls = [('/', StubHandler),
('/(.*)', StubHandler)]
return self.build_tornado_app(urls)
def test_accept_content_type(self):
'''
Test the base handler's accept picking
'''
# send NO accept header, should come back with json
response = self.fetch('/')
self.assertEqual(response.headers['Content-Type'], self.content_type_map['json'])
self.assertEqual(type(json.loads(response.body)), dict)
# Request application/json
response = self.fetch('/', headers={'Accept': self.content_type_map['json']})
self.assertEqual(response.headers['Content-Type'], self.content_type_map['json'])
self.assertEqual(type(json.loads(response.body)), dict)
# Request application/x-yaml
response = self.fetch('/', headers={'Accept': self.content_type_map['yaml']})
self.assertEqual(response.headers['Content-Type'], self.content_type_map['yaml'])
self.assertEqual(type(yaml.load(response.body)), dict)
# Request not supported content-type
response = self.fetch('/', headers={'Accept': self.content_type_map['xml']})
self.assertEqual(response.code, 406)
# Request some JSON with a browser like Accept
accept_header = self.content_type_map['real-accept-header-json']
response = self.fetch('/', headers={'Accept': accept_header})
self.assertEqual(response.headers['Content-Type'], self.content_type_map['json'])
self.assertEqual(type(json.loads(response.body)), dict)
# Request some YAML with a browser like Accept
accept_header = self.content_type_map['real-accept-header-yaml']
response = self.fetch('/', headers={'Accept': accept_header})
self.assertEqual(response.headers['Content-Type'], self.content_type_map['yaml'])
self.assertEqual(type(yaml.load(response.body)), dict)
def test_token(self):
'''
Test that the token is returned correctly
'''
token = json.loads(self.fetch('/').body)['token']
self.assertIs(token, None)
# send a token as a header
response = self.fetch('/', headers={saltnado.AUTH_TOKEN_HEADER: 'foo'})
token = json.loads(response.body)['token']
self.assertEqual(token, 'foo')
# send a token as a cookie
response = self.fetch('/', headers={'Cookie': '{0}=foo'.format(saltnado.AUTH_COOKIE_NAME)})
token = json.loads(response.body)['token']
self.assertEqual(token, 'foo')
# send both, make sure its the header
response = self.fetch('/', headers={saltnado.AUTH_TOKEN_HEADER: 'foo',
'Cookie': '{0}=bar'.format(saltnado.AUTH_COOKIE_NAME)})
token = json.loads(response.body)['token']
self.assertEqual(token, 'foo')
def test_deserialize(self):
'''
Send various encoded forms of lowstates (and bad ones) to make sure we
handle deserialization correctly
'''
valid_lowstate = [{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
# send as JSON
response = self.fetch('/',
method='POST',
body=json.dumps(valid_lowstate),
headers={'Content-Type': self.content_type_map['json']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# send yaml as json (should break)
response = self.fetch('/',
method='POST',
body=yaml.dump(valid_lowstate),
headers={'Content-Type': self.content_type_map['json']})
self.assertEqual(response.code, 400)
# send as yaml
response = self.fetch('/',
method='POST',
body=yaml.dump(valid_lowstate),
headers={'Content-Type': self.content_type_map['yaml']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# send json as yaml (works since yaml is a superset of json)
response = self.fetch('/',
method='POST',
body=json.dumps(valid_lowstate),
headers={'Content-Type': self.content_type_map['yaml']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# send json as text/plain
response = self.fetch('/',
method='POST',
body=json.dumps(valid_lowstate),
headers={'Content-Type': self.content_type_map['text']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# send form-urlencoded
form_lowstate = (
('client', 'local'),
('tgt', '*'),
('fun', 'test.fib'),
('arg', '10'),
('arg', 'foo'),
)
response = self.fetch('/',
method='POST',
body=urlencode(form_lowstate),
headers={'Content-Type': self.content_type_map['form']})
returned_lowstate = json.loads(response.body)['lowstate']
self.assertEqual(len(returned_lowstate), 1)
returned_lowstate = returned_lowstate[0]
self.assertEqual(returned_lowstate['client'], 'local')
self.assertEqual(returned_lowstate['tgt'], '*')
self.assertEqual(returned_lowstate['fun'], 'test.fib')
self.assertEqual(returned_lowstate['arg'], ['10', 'foo'])
# Send json with utf8 charset
response = self.fetch('/',
method='POST',
body=json.dumps(valid_lowstate),
headers={'Content-Type': self.content_type_map['json-utf8']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
def test_get_lowstate(self):
'''
Test transformations low data of the function _get_lowstate
'''
valid_lowstate = [{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
}]
# Case 1. dictionary type of lowstate
request_lowstate = {
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
}
response = self.fetch('/',
method='POST',
body=json.dumps(request_lowstate),
headers={'Content-Type': self.content_type_map['json']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# Case 2. string type of arg
request_lowstate = [{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": "10"
}]
response = self.fetch('/',
method='POST',
body=json.dumps(request_lowstate),
headers={'Content-Type': self.content_type_map['json']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# Case 3. Combine Case 1 and Case 2.
request_lowstate = {
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": "10"
}
# send as json
response = self.fetch('/',
method='POST',
body=json.dumps(request_lowstate),
headers={'Content-Type': self.content_type_map['json']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# send as yaml
response = self.fetch('/',
method='POST',
body=yaml.dump(request_lowstate),
headers={'Content-Type': self.content_type_map['yaml']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# send as plain text
response = self.fetch('/',
method='POST',
body=json.dumps(request_lowstate),
headers={'Content-Type': self.content_type_map['text']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
# send as form-urlencoded
request_form_lowstate = (
('client', 'local'),
('tgt', '*'),
('fun', 'test.fib'),
('arg', '10'),
)
response = self.fetch('/',
method='POST',
body=urlencode(request_form_lowstate),
headers={'Content-Type': self.content_type_map['form']})
self.assertEqual(valid_lowstate, json.loads(response.body)['lowstate'])
def test_cors_origin_wildcard(self):
'''
Check that endpoints returns Access-Control-Allow-Origin
'''
self._app.mod_opts['cors_origin'] = '*'
headers = self.fetch('/').headers
self.assertEqual(headers["Access-Control-Allow-Origin"], "*")
def test_cors_origin_single(self):
'''
Check that endpoints returns the Access-Control-Allow-Origin when
only one origins is set
'''
self._app.mod_opts['cors_origin'] = 'http://example.foo'
# Example.foo is an authorized origin
headers = self.fetch('/', headers={'Origin': 'http://example.foo'}).headers
self.assertEqual(headers["Access-Control-Allow-Origin"], "http://example.foo")
# Example2.foo is not an authorized origin
headers = self.fetch('/', headers={'Origin': 'http://example2.foo'}).headers
self.assertEqual(headers.get("Access-Control-Allow-Origin"), None)
def test_cors_origin_multiple(self):
'''
Check that endpoints returns the Access-Control-Allow-Origin when
multiple origins are set
'''
self._app.mod_opts['cors_origin'] = ['http://example.foo', 'http://foo.example']
# Example.foo is an authorized origin
headers = self.fetch('/', headers={'Origin': 'http://example.foo'}).headers
self.assertEqual(headers["Access-Control-Allow-Origin"], "http://example.foo")
# Example2.foo is not an authorized origin
headers = self.fetch('/', headers={'Origin': 'http://example2.foo'}).headers
self.assertEqual(headers.get("Access-Control-Allow-Origin"), None)
def test_cors_preflight_request(self):
'''
Check that preflight request contains right headers
'''
self._app.mod_opts['cors_origin'] = '*'
request_headers = 'X-Auth-Token, accept, content-type'
preflight_headers = {'Access-Control-Request-Headers': request_headers,
'Access-Control-Request-Method': 'GET'}
response = self.fetch('/', method='OPTIONS', headers=preflight_headers)
headers = response.headers
self.assertEqual(response.code, 204)
self.assertEqual(headers['Access-Control-Allow-Headers'], request_headers)
self.assertEqual(headers['Access-Control-Expose-Headers'], 'X-Auth-Token')
self.assertEqual(headers['Access-Control-Allow-Methods'], 'OPTIONS, GET, POST')
self.assertEqual(response.code, 204)
def test_cors_origin_url_with_arguments(self):
'''
Check that preflight requests works with url with components
like jobs or minions endpoints.
'''
self._app.mod_opts['cors_origin'] = '*'
request_headers = 'X-Auth-Token, accept, content-type'
preflight_headers = {'Access-Control-Request-Headers': request_headers,
'Access-Control-Request-Method': 'GET'}
response = self.fetch('/1234567890', method='OPTIONS',
headers=preflight_headers)
headers = response.headers
self.assertEqual(response.code, 204)
self.assertEqual(headers["Access-Control-Allow-Origin"], "*")
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TestWebhookSaltHandler(SaltnadoTestCase):
def get_app(self):
urls = [
(r'/hook(/.*)?', saltnado.WebhookSaltAPIHandler),
]
return self.build_tornado_app(urls)
@patch('salt.utils.event.get_event')
def test_hook_can_handle_get_parameters(self, get_event):
self._app.mod_opts['webhook_disable_auth'] = True
event = MagicMock()
event.fire_event.return_value = True
get_event.return_value = event
response = self.fetch('/hook/my_service/?param=1¶m=2',
body=json.dumps({}),
method='POST',
headers={'Content-Type': self.content_type_map['json']})
self.assertEqual(response.code, 200, response.body)
host = urlparse(response.effective_url).netloc
event.fire_event.assert_called_once_with(
{'headers': {'Content-Length': '2',
'Connection': 'close',
'Content-Type': 'application/json',
'Host': host,
'Accept-Encoding': 'gzip'},
'post': {},
'get': {'param': ['1', '2']}
},
'salt/netapi/hook/my_service/',
)
class TestSaltAuthHandler(SaltnadoTestCase):
def get_app(self):
urls = [('/login', saltnado.SaltAuthHandler)]
return self.build_tornado_app(urls)
def test_get(self):
'''
We don't allow gets, so assert we get 401s
'''
response = self.fetch('/login')
self.assertEqual(response.code, 401)
def test_login(self):
'''
Test valid logins
'''
# Test in form encoded
response = self.fetch('/login',
method='POST',
body=urlencode(self.auth_creds),
headers={'Content-Type': self.content_type_map['form']})
self.assertEqual(response.code, 200)
response_obj = json.loads(response.body)['return'][0]
self.assertEqual(response_obj['perms'], self.opts['external_auth']['auto'][self.auth_creds_dict['username']])
self.assertIn('token', response_obj) # TODO: verify that its valid?
self.assertEqual(response_obj['user'], self.auth_creds_dict['username'])
self.assertEqual(response_obj['eauth'], self.auth_creds_dict['eauth'])
# Test in JSON
response = self.fetch('/login',
method='POST',
body=json.dumps(self.auth_creds_dict),
headers={'Content-Type': self.content_type_map['json']})
self.assertEqual(response.code, 200)
response_obj = json.loads(response.body)['return'][0]
self.assertEqual(response_obj['perms'], self.opts['external_auth']['auto'][self.auth_creds_dict['username']])
self.assertIn('token', response_obj) # TODO: verify that its valid?
self.assertEqual(response_obj['user'], self.auth_creds_dict['username'])
self.assertEqual(response_obj['eauth'], self.auth_creds_dict['eauth'])
# Test in YAML
response = self.fetch('/login',
method='POST',
body=yaml.dump(self.auth_creds_dict),
headers={'Content-Type': self.content_type_map['yaml']})
self.assertEqual(response.code, 200)
response_obj = json.loads(response.body)['return'][0]
self.assertEqual(response_obj['perms'], self.opts['external_auth']['auto'][self.auth_creds_dict['username']])
self.assertIn('token', response_obj) # TODO: verify that its valid?
self.assertEqual(response_obj['user'], self.auth_creds_dict['username'])
self.assertEqual(response_obj['eauth'], self.auth_creds_dict['eauth'])
def test_login_missing_password(self):
'''
Test logins with bad/missing passwords
'''
bad_creds = []
for key, val in six.iteritems(self.auth_creds_dict):
if key == 'password':
continue
bad_creds.append((key, val))
response = self.fetch('/login',
method='POST',
body=urlencode(bad_creds),
headers={'Content-Type': self.content_type_map['form']})
self.assertEqual(response.code, 400)
def test_login_bad_creds(self):
'''
Test logins with bad/missing passwords
'''
bad_creds = []
for key, val in six.iteritems(self.auth_creds_dict):
if key == 'username':
val = val + 'foo'
bad_creds.append((key, val))
response = self.fetch('/login',
method='POST',
body=urlencode(bad_creds),
headers={'Content-Type': self.content_type_map['form']})
self.assertEqual(response.code, 401)
def test_login_invalid_data_structure(self):
'''
Test logins with either list or string JSON payload
'''
response = self.fetch('/login',
method='POST',
body=json.dumps(self.auth_creds),
headers={'Content-Type': self.content_type_map['form']})
self.assertEqual(response.code, 400)
response = self.fetch('/login',
method='POST',
body=json.dumps(42),
headers={'Content-Type': self.content_type_map['form']})
self.assertEqual(response.code, 400)
response = self.fetch('/login',
method='POST',
body=json.dumps('mystring42'),
headers={'Content-Type': self.content_type_map['form']})
self.assertEqual(response.code, 400)
@skipIf(HAS_TORNADO is False, 'The tornado package needs to be installed') # pylint: disable=W0223
class TestWebsocketSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
urls = [
('/login', saltnado.SaltAuthHandler),
(r"/hook/([0-9A-Fa-f]{32})", saltnado_websockets.AllEventsHandler)]
application = self.build_tornado_app(urls)
return application
@gen_test
def test_websocket_handler_upgrade_to_websocket(self):
response = yield self.http_client.fetch(self.get_url('/login'),
method='POST',
body=urlencode(self.auth_creds),
headers={'Content-Type': self.content_type_map['form']})
token = json.loads(response.body)['return'][0]['token']
url = 'ws://127.0.0.1:{0}/hook/{1}'.format(self.get_http_port(), token)
request = HTTPRequest(url, headers={'Origin': 'http://example.com',
'Host': 'example.com'})
ws = yield websocket_connect(request)
ws.write_message('websocket client ready')
ws.close()
@gen_test
def test_websocket_handler_bad_token(self):
"""
A bad token should returns a 401 during a websocket connect
"""
token = 'A'*32
url = 'ws://127.0.0.1:{0}/hook/{1}'.format(self.get_http_port(), token)
request = HTTPRequest(url, headers={'Origin': 'http://example.com',
'Host': 'example.com'})
try:
ws = yield websocket_connect(request)
except HTTPError as error:
self.assertEqual(error.code, 401)
@gen_test
def test_websocket_handler_cors_origin_wildcard(self):
self._app.mod_opts['cors_origin'] = '*'
response = yield self.http_client.fetch(self.get_url('/login'),
method='POST',
body=urlencode(self.auth_creds),
headers={'Content-Type': self.content_type_map['form']})
token = json.loads(response.body)['return'][0]['token']
url = 'ws://127.0.0.1:{0}/hook/{1}'.format(self.get_http_port(), token)
request = HTTPRequest(url, headers={'Origin': 'http://foo.bar',
'Host': 'example.com'})
ws = yield websocket_connect(request)
ws.write_message('websocket client ready')
ws.close()
@gen_test
def test_cors_origin_single(self):
self._app.mod_opts['cors_origin'] = 'http://example.com'
response = yield self.http_client.fetch(self.get_url('/login'),
method='POST',
body=urlencode(self.auth_creds),
headers={'Content-Type': self.content_type_map['form']})
token = json.loads(response.body)['return'][0]['token']
url = 'ws://127.0.0.1:{0}/hook/{1}'.format(self.get_http_port(), token)
# Example.com should works
request = HTTPRequest(url, headers={'Origin': 'http://example.com',
'Host': 'example.com'})
ws = yield websocket_connect(request)
ws.write_message('websocket client ready')
ws.close()
# But foo.bar not
request = HTTPRequest(url, headers={'Origin': 'http://foo.bar',
'Host': 'example.com'})
try:
ws = yield websocket_connect(request)
except HTTPError as error:
self.assertEqual(error.code, 403)
@gen_test
def test_cors_origin_multiple(self):
self._app.mod_opts['cors_origin'] = ['http://example.com', 'http://foo.bar']
response = yield self.http_client.fetch(self.get_url('/login'),
method='POST',
body=urlencode(self.auth_creds),
headers={'Content-Type': self.content_type_map['form']})
token = json.loads(response.body)['return'][0]['token']
url = 'ws://127.0.0.1:{0}/hook/{1}'.format(self.get_http_port(), token)
# Example.com should works
request = HTTPRequest(url, headers={'Origin': 'http://example.com',
'Host': 'example.com'})
ws = yield websocket_connect(request)
ws.write_message('websocket client ready')
ws.close()
# Foo.bar too
request = HTTPRequest(url, headers={'Origin': 'http://foo.bar',
'Host': 'example.com'})
ws = yield websocket_connect(request)
ws.write_message('websocket client ready')
ws.close()
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(TestBaseSaltAPIHandler, TestSaltAuthHandler, needs_daemon=False)
|
[] |
[] |
[
"ASYNC_TEST_TIMEOUT"
] |
[]
|
["ASYNC_TEST_TIMEOUT"]
|
python
| 1 | 0 | |
scripts/gen_test_makefile.py
|
#!/usr/bin/python3
###############################################################################
#
# Copyright (c) 2015-2020, Intel Corporation
# Copyright (c) 2019-2020, University of Utah
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
# This scripts creates Test_Makefile, basing on configuration file
###############################################################################
import argparse
import collections
import enum
import logging
import os
import sys
import re
import common
Test_Makefile_name = "Test_Makefile"
license_file_name = "LICENSE.txt"
check_isa_file_name = "check_isa.cpp"
default_test_sets_file_name = "test_sets.txt"
default_config_file = "test_sets.txt"
comp_specs_line = "Compiler specs:"
spec_list_len = 5
test_sets_line = "Testing sets:"
set_list_len = 5
stats_capt_opt_line = "Options for statistics' capture:"
stats_capt_opt_list_len = 2
###############################################################################
# Section for Test_Makefile parameters
class MakefileVariable:
"""A special class, which should link together name and value of parameters"""
def __init__(self, name, value):
self.name = name
self.value = value
# I can't use build-in dictionary, because variables should be ordered
Makefile_variable_list = []
cxx_flags = MakefileVariable("CXXFLAGS", "")
Makefile_variable_list.append(cxx_flags)
ld_flags = MakefileVariable("LDFLAGS", "")
Makefile_variable_list.append(ld_flags)
std_flags = MakefileVariable("STDFLAGS", "-std=")
Makefile_variable_list.append(std_flags)
# File extension will be set later to match selected language standard in adjust_sources_to_standard()
sources = MakefileVariable("SOURCES", "driver func")
Makefile_variable_list.append(sources)
headers = MakefileVariable("HEADERS", "init.h")
Makefile_variable_list.append(headers)
executable = MakefileVariable("EXECUTABLE", "out")
Makefile_variable_list.append(executable)
# Makefile_variable_list.append(Makefile_variable("",""))
stat_options = MakefileVariable("STATFLAGS", "")
Makefile_variable_list.append(stat_options)
###############################################################################
# Section for language standards
@enum.unique
class StdID(enum.IntEnum):
# Better to use enum.auto, but it is available only since python3.6
C99 = 0
C11 = 1
MAX_C_ID = 2
CXX98 = 3
CXX03 = 4
CXX11 = 5
CXX14 = 6
CXX17 = 7
MAX_CXX_ID = 8
def is_c (self):
return StdID.C99.value <= self.value < StdID.MAX_C_ID.value
def is_cxx (self):
return StdID.CXX98.value <= self.value < StdID.MAX_CXX_ID.value
''' Enum doesn't allow to use '++' in names, so we need this function. '''
@staticmethod
def get_pretty_std_name (std_id):
if std_id.is_c():
return std_id.name.replace("C", "c")
if std_id.is_cxx():
return std_id.name.replace("CXX", "c++")
''' Easy way to convert string to StdID '''
StrToStdId = collections.OrderedDict()
for i in StdID:
if not i.name.startswith("MAX"):
StrToStdId[StdID.get_pretty_std_name(i)] = i
selected_standard = None
def get_file_ext():
if selected_standard.is_c():
return ".c"
if selected_standard.is_cxx():
return ".cpp"
return None
def adjust_sources_to_standard():
#sources.value = re.sub("\s+|$", get_file_ext() + " ", sources.value)
new_sources = ""
for source in sources.value.split():
if source == "func" and common.selected_gen_std == common.GenStdID.ISPC:
new_sources += source + ".ispc"
else:
new_sources += source + get_file_ext()
new_sources += " "
sources.value = new_sources.strip()
def set_standard (std_str):
global selected_standard
selected_standard = StrToStdId[std_str]
std_flags.value += StdID.get_pretty_std_name(selected_standard)
adjust_sources_to_standard()
def get_standard ():
global selected_standard
return StdID.get_pretty_std_name(selected_standard)
def check_if_std_defined ():
if selected_standard is None or \
selected_standard == StdID.MAX_C_ID or \
selected_standard == StdID.MAX_CXX_ID:
common.print_and_exit("Language standard wasn't selected!")
###############################################################################
# Section for sde
class SdeTarget (object):
all_sde_targets = []
def __init__(self, name, enum_value):
self.name = name
self.enum_value = enum_value
SdeTarget.all_sde_targets.append(self)
SdeArch = dict()
# This list should be ordered!
SdeArch["p4"] = SdeTarget("p4" , 0)
SdeArch["p4p"] = SdeTarget("p4p", 1)
SdeArch["mrm"] = SdeTarget("mrm", 2)
SdeArch["pnr"] = SdeTarget("pnr", 3)
SdeArch["nhm"] = SdeTarget("nhm", 4)
SdeArch["wsm"] = SdeTarget("wsm", 5)
SdeArch["snb"] = SdeTarget("snb", 6)
SdeArch["ivb"] = SdeTarget("ivb", 7)
SdeArch["hsw"] = SdeTarget("hsw", 8)
SdeArch["bdw"] = SdeTarget("bdw", 9)
SdeArch["skx"] = SdeTarget("skx", 10)
SdeArch["icx"] = SdeTarget("icx", 11)
SdeArch["tgl"] = SdeTarget("tgl", 12)
SdeArch["knl"] = SdeTarget("knl", 13)
SdeArch[""] = SdeTarget("", 14) # It is a fake target and it should always be the last
def define_sde_arch(native, target):
if target == SdeArch["skx"] and native != SdeArch["skx"]:
return SdeArch["skx"].name
if target == SdeArch["icx"] and native != SdeArch["icx"]:
return SdeArch["icx"].name
if target == SdeArch["tgl"] and native != SdeArch["tgl"]:
return SdeArch["tgl"].name
if target == SdeArch["knl"] and native != SdeArch["knl"]:
return SdeArch["knl"].name
if native.enum_value < target.enum_value:
return target.name
return ""
###############################################################################
# Section for targets
class CompilerSpecs (object):
all_comp_specs = dict()
def __init__(self, name, cxx_exec_name, c_exec_name, common_args, arch_prefix):
self.name = name
self.comp_cxx_name = cxx_exec_name
self.comp_c_name = c_exec_name
self.common_args = common_args
self.arch_prefix = arch_prefix
self.version = "unknown"
CompilerSpecs.all_comp_specs[name] = self
def set_version(self, version):
self.version = version
class Arch (object):
def __init__(self, comp_name, sde_arch):
self.comp_name = comp_name
self.sde_arch = sde_arch
class CompilerTarget (object):
all_targets = []
def __init__(self, name, specs, target_args, arch):
self.name = name
self.specs = specs
self.args = specs.common_args + " " + target_args
self.arch = arch
CompilerTarget.all_targets.append(self)
class StatisticsOptions (object):
all_stats_options = dict()
def __init__(self, spec, options):
self.options = options
StatisticsOptions.all_stats_options[spec.name] = self
@staticmethod
def get_options(spec):
try:
return StatisticsOptions.all_stats_options[spec.name].options
except KeyError:
common.print_and_exit("Can't find key!")
###############################################################################
# Section for config parser
def skip_line(line):
return line.startswith("#") or re.match(r'^\s*$', line)
def check_config_list(config_list, fixed_len, message):
common.log_msg(logging.DEBUG, "Adding config list: " + str(config_list))
if len(config_list) < fixed_len:
common.print_and_exit(message + str(config_list))
config_list = [x.strip() for x in config_list]
return config_list
def add_specs(spec_list):
spec_list = check_config_list(spec_list, spec_list_len, "Error in spec string, check it: ")
try:
CompilerSpecs(spec_list[0], spec_list[1], spec_list[2], spec_list[3], spec_list[4])
common.log_msg(logging.DEBUG, "Finished adding compiler spec")
except KeyError:
common.print_and_exit("Can't find key!")
def add_sets(set_list):
set_list = check_config_list(set_list, set_list_len, "Error in set string, check it: ")
try:
CompilerTarget(set_list[0], CompilerSpecs.all_comp_specs[set_list[1]], set_list[2],
Arch(set_list[3], SdeArch[set_list[4]]))
common.log_msg(logging.DEBUG, "Finished adding testing set")
except KeyError:
common.print_and_exit("Can't find key!")
def add_stats_options(stats_opt_list):
stats_opt_list = check_config_list(stats_opt_list, stats_capt_opt_list_len,
"Error in stats options string, check it: ")
try:
StatisticsOptions(CompilerSpecs.all_comp_specs[stats_opt_list[0]], stats_opt_list[1])
common.log_msg(logging.DEBUG, "Finished adding stats option string")
except KeyError:
common.print_and_exit("Can't find key!")
def read_compiler_specs(config_iter, function, next_section_name=""):
for config_line in config_iter:
if skip_line(config_line):
continue
if next_section_name != "" and config_line.startswith(next_section_name):
return
specs = config_line.split("|")
function(specs)
def parse_config(file_name):
# Before parsing, clean old data
CompilerSpecs.all_comp_specs = dict()
CompilerTarget.all_targets = []
# Parse
config_file = common.check_and_open_file(file_name, "r")
config = config_file.read().splitlines()
config_file.close()
if not any(s.startswith(comp_specs_line) for s in config) or not any(s.startswith(test_sets_line) for s in config):
common.print_and_exit("Invalid config file! Check it!")
config_iter = iter(config)
for config_line in config_iter:
if skip_line(config_line):
continue
if config_line.startswith(comp_specs_line):
read_compiler_specs(config_iter, add_specs, test_sets_line)
read_compiler_specs(config_iter, add_sets, stats_capt_opt_line)
read_compiler_specs(config_iter, add_stats_options)
###############################################################################
def detect_native_arch():
check_isa_file = os.path.abspath(common.yarpgen_scripts + os.sep + check_isa_file_name)
check_isa_binary = os.path.abspath(common.yarpgen_scripts + os.sep + check_isa_file_name.replace(".cpp", ""))
sys_compiler = ""
for key in CompilerSpecs.all_comp_specs:
exec_name = CompilerSpecs.all_comp_specs[key].comp_cxx_name
if common.if_exec_exist(exec_name):
sys_compiler = exec_name
break
if sys_compiler == "":
common.print_and_exit("Can't find any compiler")
if not common.if_exec_exist(check_isa_binary):
if not os.path.exists(check_isa_file):
common.print_and_exit("Can't find " + check_isa_file)
ret_code, output, err_output, time_expired, elapsed_time = \
common.run_cmd([sys_compiler, check_isa_file, "-o", check_isa_binary], None)
if ret_code != 0:
common.print_and_exit("Can't compile " + check_isa_file + ": " + str(err_output, "utf-8"))
ret_code, output, err_output, time_expired, elapsed_time = common.run_cmd([check_isa_binary], None)
if ret_code != 0:
common.print_and_exit("Error while executing " + check_isa_binary)
native_arch_str = str(output, "utf-8").split()[0]
for sde_target in SdeTarget.all_sde_targets:
if sde_target.name == native_arch_str:
return sde_target
common.print_and_exit("Can't detect system ISA")
def gen_makefile(out_file_name, force, config_file, only_target=None, inject_blame_opt=None,
creduce_file=None, stat_targets=None):
# Somebody can prepare test specs and target, so we don't need to parse config file
check_if_std_defined()
if config_file is not None:
parse_config(config_file)
output = ""
if stat_targets is not None:
stat_targets = list(set(stat_targets))
# 1. License
license_file = common.check_and_open_file(os.path.abspath(common.yarpgen_scripts + os.sep + ".." + os.sep + license_file_name), "r")
for license_str in license_file:
output += "#" + license_str
license_file.close()
output += "###############################################################################\n"
output += "#This file was generated automatically.\n"
output += "#If you want to make a permanent changes, you should edit gen_test_makefile.py\n"
output += "###############################################################################\n\n"
# 2. Define common variables
for makefile_variable in Makefile_variable_list:
test_pwd = ""
if creduce_file and makefile_variable.name == "CXXFLAGS":
test_pwd = " -I$(TEST_PWD)"
output += makefile_variable.name + "=" + makefile_variable.value + test_pwd + "\n"
output += "\n"
# 3. Define build targets
for target in CompilerTarget.all_targets:
if only_target is not None and only_target.name != target.name:
continue
compiler_name = None
if selected_standard.is_c():
compiler_name = target.specs.comp_c_name
if selected_standard.is_cxx():
compiler_name = target.specs.comp_cxx_name
output += target.name + ": " + "COMPILER=" + compiler_name + "\n"
optflags_str = target.name + ": " + "OPTFLAGS=" + target.args
if target.arch.comp_name != "":
optflags_str += " " + target.specs.arch_prefix + target.arch.comp_name
optflags_str += "\n"
output += optflags_str
# For performance reasons driver should always be compiled with -O0
output += re.sub("-O\d", "-O0", (optflags_str.replace("OPTFLAGS", "DRIVER_OPTFLAGS")))
if inject_blame_opt is not None:
output += target.name + ": " + "BLAMEOPTS=" + inject_blame_opt + "\n"
#TODO: one day we can decide to use gcc also.
if stat_targets is not None:
for stat_target in stat_targets:
if target.name == stat_target:
output += target.name + ": " + stat_options.name + "=" + \
StatisticsOptions.get_options(target.specs) + "\n"
stat_targets.remove(stat_target)
output += target.name + ": " + "EXECUTABLE=" + target.name + "_" + executable.value + "\n"
output += target.name + ": " + "$(addprefix " + target.name + "_,"
if common.selected_gen_std != common.GenStdID.ISPC:
output += "$(SOURCES:" + get_file_ext() + "=.o))\n"
else:
output += "$(patsubst %.ispc,%.o," + "$(SOURCES:" + get_file_ext() + "=.o))" + ")\n"
output += "\t" + "$(COMPILER) $(LDFLAGS) $(STDFLAGS) $(OPTFLAGS) -o $(EXECUTABLE) $^\n\n"
if stat_targets is not None and len(stat_targets) != 0:
common.log_msg(logging.WARNING, "Can't find relevant stat_targets: " + str(stat_targets), forced_duplication=True)
# 4. Force make to rebuild everything
# TODO: replace with PHONY
output += "FORCE:\n\n"
for source in sources.value.split():
source_prefix = ""
force_str = " FORCE\n"
if creduce_file and creduce_file != source:
source_prefix = "$(TEST_PWD)/"
force_str = "\n"
source_name = source.split(".")[0]
output += "%" + source_name + ".o: " + source_prefix + source + force_str
# For performance reasons driver should always be compiled with -O0
optflags_name = "$(OPTFLAGS)" if source_name != "driver" else "$(DRIVER_OPTFLAGS)"
output += "\t" + "$(COMPILER) $(CXXFLAGS) $(STDFLAGS) " + optflags_name + " -o $@ -c $<"
if source_name == "func":
output += " $(STATFLAGS) "
if inject_blame_opt is not None:
output += " $(BLAMEOPTS) "
output += "\n\n"
output += "clean:\n"
output += "\trm *.o *_$(EXECUTABLE)\n\n"
# 5. Define run targets
native_arch = detect_native_arch()
for target in CompilerTarget.all_targets:
if only_target is not None and only_target.name != target.name:
continue
output += "run_" + target.name + ": " + target.name + "_" + executable.value + "\n"
output += "\t@"
required_sde_arch = define_sde_arch(native_arch, target.arch.sde_arch)
if required_sde_arch != "":
output += "sde -" + required_sde_arch + " -- "
output += "." + os.sep + target.name + "_" + executable.value + "\n\n"
out_file = None
if not os.path.isfile(out_file_name):
out_file = open(out_file_name, "w")
else:
if force:
out_file = open(out_file_name, "w")
else:
common.print_and_exit("File already exists. Use -f if you want to rewrite it.")
out_file.write(output)
out_file.close()
###############################################################################
if __name__ == '__main__':
if os.environ.get("YARPGEN_HOME") is None:
sys.stderr.write("\nWarning: please set YARPGEN_HOME environment variable to point to yarpgen's directory,"
"using " + common.yarpgen_home + " for now\n")
description = 'Generator of Test_Makefiles.'
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--std', dest="std_str", default="c++", type=str,
help='Language standard. Possible variants are ' + str(list(common.StrToGenStdId))[1:-1])
parser.add_argument("--config-file", dest="config_file",
default=os.path.join(common.yarpgen_scripts, default_test_sets_file_name), type=str,
help="Configuration file for testing")
parser.add_argument("-o", "--output", dest="out_file", default=Test_Makefile_name, type=str,
help="Output file")
parser.add_argument("-f", "--force", dest="force", default=False, action="store_true",
help="Rewrite output file")
parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true",
help="Increase output verbosity")
parser.add_argument("--log-file", dest="log_file", type=str,
help="Logfile")
parser.add_argument("--creduce-file", dest="creduce_file", default=None, type=str,
help="Source file to reduce")
parser.add_argument("--collect-stat", dest="collect_stat", default="", type=str,
help="List of testing sets for statistics collection")
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
common.setup_logger(args.log_file, log_level)
common.check_python_version()
common.set_gen_standard(args.std_str)
set_standard(StdID.get_pretty_std_name(StdID.CXX11))
gen_makefile(os.path.abspath(args.out_file), args.force, args.config_file, creduce_file=args.creduce_file,
stat_targets=args.collect_stat.split())
|
[] |
[] |
[
"YARPGEN_HOME"
] |
[]
|
["YARPGEN_HOME"]
|
python
| 1 | 0 | |
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# import m2r
import builtins
import glob
import inspect
import os
import re
import shutil
import sys
import pt_lightning_sphinx_theme
from sphinx.ext import apidoc
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_ROOT = os.path.join(PATH_HERE, '..', '..')
sys.path.insert(0, os.path.abspath(PATH_ROOT))
builtins.__LIGHTNING_BOLT_SETUP__: bool = True
SPHINX_MOCK_REQUIREMENTS = int(os.environ.get('SPHINX_MOCK_REQUIREMENTS', True))
import pl_bolts # noqa: E402
# -- Project information -----------------------------------------------------
# this name shall match the project name in Github as it is used for linking to code
project = 'PyTorch-Lightning-Bolts'
copyright = pl_bolts.__copyright__
author = pl_bolts.__author__
# The short X.Y version
version = pl_bolts.__version__
# The full version, including alpha/beta/rc tags
release = pl_bolts.__version__
# Options for the linkcode extension
# ----------------------------------
github_user = 'PyTorchLightning'
github_repo = project
# -- Project documents -------------------------------------------------------
# export the READme
with open(os.path.join(PATH_ROOT, 'README.md'), 'r') as fp:
readme = fp.read()
# TODO: temp fix removing SVG badges and GIF, because PDF cannot show them
readme = re.sub(r'(\[!\[.*\))', '', readme)
readme = re.sub(r'(!\[.*.gif\))', '', readme)
for dir_name in (os.path.basename(p) for p in glob.glob(os.path.join(PATH_ROOT, '*')) if os.path.isdir(p)):
readme = readme.replace('](%s/' % dir_name, '](%s/%s/' % (PATH_ROOT, dir_name))
with open('readme.md', 'w') as fp:
fp.write(readme)
# copy all documents from GH templates like contribution guide
for md in glob.glob(os.path.join(PATH_ROOT, '.github', '*.md')):
shutil.copy(md, os.path.join(PATH_HERE, os.path.basename(md)))
# export the changelog
with open(os.path.join(PATH_ROOT, 'CHANGELOG.md'), 'r') as fp:
chlog_lines = fp.readlines()
# enrich short subsub-titles to be unique
chlog_ver = ''
for i, ln in enumerate(chlog_lines):
if ln.startswith('## '):
chlog_ver = ln[2:].split('-')[0].strip()
elif ln.startswith('### '):
ln = ln.replace('###', f'### {chlog_ver} -')
chlog_lines[i] = ln
with open(os.path.join(PATH_HERE, 'CHANGELOG.md'), 'w') as fp:
fp.writelines(chlog_lines)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinxcontrib.mockautodoc', # raises error: directive 'automodule' is already registered ...
# 'sphinxcontrib.fulltoc', # breaks pytorch-theme with unexpected kw argument 'titles_only'
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.linkcode',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.imgmath',
'recommonmark',
'sphinx.ext.autosectionlabel',
# 'm2r',
# 'nbsphinx', # it seems some sphinx issue
'sphinx_autodoc_typehints',
'sphinx_copybutton',
'sphinx_paramlinks',
'sphinx_togglebutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# https://berkeley-stat159-f17.github.io/stat159-f17/lectures/14-sphinx..html#conf.py-(cont.)
# https://stackoverflow.com/questions/38526888/embed-ipython-notebook-in-sphinx-document
# I execute the notebooks manually in advance. If notebooks test the code,
# they should be run at build time.
nbsphinx_execute = 'never'
nbsphinx_allow_errors = True
nbsphinx_requirejs_path = ''
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
'.ipynb': 'nbsphinx',
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'api/pl_bolts.rst',
'api/modules.rst',
'api/pl_bolts.submit.rst',
'api/pl_bolts.utils.*',
'PULL_REQUEST_TEMPLATE.md',
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pt_lightning_sphinx_theme'
html_theme_path = [pt_lightning_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'pytorch_project': pl_bolts.__homepage__,
'canonical_url': pl_bolts.__homepage__,
'collapse_navigation': False,
'display_version': True,
'logo_only': False,
}
html_logo = '_images/logos/bolts_logo.png'
html_favicon = '_images/logos/lightning_icon.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_images', '_templates', '_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = project + '-doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project + '.tex', project + ' Documentation', author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, project, project + ' Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
master_doc,
project,
project + ' Documentation',
author,
project,
'The lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate.',
'Miscellaneous',
)]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'pytorch_lightning': ('https://pytorch-lightning.readthedocs.io/en/stable/', None),
'python': ('https://docs.python.org/3', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'PIL': ('https://pillow.readthedocs.io/en/stable/', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# https://github.com/rtfd/readthedocs.org/issues/1139
# I use sphinx-apidoc to auto-generate API documentation for my project.
# Right now I have to commit these auto-generated files to my repository
# so that RTD can build them into HTML docs. It'd be cool if RTD could run
# sphinx-apidoc for me, since it's easy to forget to regen API docs
# and commit them to my repo after making changes to my code.
# packages for which sphinx-apidoc should generate the docs (.rst files)
PACKAGES = [
pl_bolts.__name__,
]
apidoc_output_folder = os.path.join(PATH_HERE, 'api')
def run_apidoc(_):
sys.path.insert(0, apidoc_output_folder)
# delete api-doc files before generating them
if os.path.exists(apidoc_output_folder):
shutil.rmtree(apidoc_output_folder)
for pkg in PACKAGES:
argv = [
'-e',
'-o',
apidoc_output_folder,
os.path.join(PATH_ROOT, pkg),
'**/test_*',
'--force',
'--private',
'--module-first',
]
apidoc.main(argv)
def setup(app):
# this is for hiding doctest decoration,
# see: http://z4r.github.io/python/2011/12/02/hides-the-prompts-and-output/
app.add_javascript('copybutton.js')
app.connect('builder-inited', run_apidoc)
# copy all notebooks to local folder
path_nbs = os.path.join(PATH_HERE, 'notebooks')
if not os.path.isdir(path_nbs):
os.mkdir(path_nbs)
for path_ipynb in glob.glob(os.path.join(PATH_ROOT, 'notebooks', '*.ipynb')):
path_ipynb2 = os.path.join(path_nbs, os.path.basename(path_ipynb))
shutil.copy(path_ipynb, path_ipynb2)
# Ignoring Third-party packages
# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule
def package_list_from_file(file):
mocked_packages = []
with open(file, 'r') as fp:
for ln in fp.readlines():
found = [ln.index(ch) for ch in list(',=<>#') if ch in ln]
pkg = ln[:min(found)] if found else ln
if pkg.strip():
mocked_packages.append(pkg.strip())
return mocked_packages
# define mapping from PyPI names to python imports
PACKAGE_MAPPING = {
'pytorch-lightning': 'pytorch_lightning',
'scikit-learn': 'sklearn',
'Pillow': 'PIL',
'opencv-python': 'cv2',
}
MOCK_PACKAGES = []
if SPHINX_MOCK_REQUIREMENTS:
# mock also base packages when we are on RTD since we don't install them there
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements.txt'))
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'models.txt'))
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'loggers.txt'))
# replace PyPI packages by importing ones
MOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]
autodoc_mock_imports = MOCK_PACKAGES
# for mod_name in MOCK_REQUIRE_PACKAGES:
# sys.modules[mod_name] = mock.Mock()
# Resolve function
# This function is used to populate the (source) links in the API
def linkcode_resolve(domain, info):
def find_source():
# try to find the file and line number, based on code from numpy:
# https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
fname = inspect.getsourcefile(obj)
# https://github.com/rtfd/readthedocs.org/issues/5735
if any([s in fname for s in ('readthedocs', 'rtfd', 'checkouts')]):
# /home/docs/checkouts/readthedocs.org/user_builds/pytorch_lightning/checkouts/
# devel/pytorch_lightning/utilities/cls_experiment.py#L26-L176
path_top = os.path.abspath(os.path.join('..', '..', '..'))
fname = os.path.relpath(fname, start=path_top)
else:
# Local build, imitate master
fname = 'master/' + os.path.relpath(fname, start=os.path.abspath('..'))
source, lineno = inspect.getsourcelines(obj)
return fname, lineno, lineno + len(source) - 1
if domain != 'py' or not info['module']:
return None
try:
filename = '%s#L%d-L%d' % find_source()
except Exception:
filename = info['module'].replace('.', '/') + '.py'
# import subprocess
# tag = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE,
# universal_newlines=True).communicate()[0][:-1]
branch = filename.split('/')[0]
# do mapping from latest tags to master
branch = {'latest': 'master', 'stable': 'master'}.get(branch, branch)
filename = '/'.join([branch] + filename.split('/')[1:])
return "https://github.com/%s/%s/blob/%s" \
% (github_user, github_repo, filename)
autodoc_member_order = 'groupwise'
autoclass_content = 'both'
# the options are fixed and will be soon in release,
# see https://github.com/sphinx-doc/sphinx/issues/5459
autodoc_default_options = {
'members': True,
'methods': True,
'special-members': '__call__',
'exclude-members': '_abc_impl',
'show-inheritance': True,
'noindex': True,
}
# Sphinx will add “permalinks” for each heading and description environment as paragraph signs that
# become visible when the mouse hovers over them.
# This value determines the text for the permalink; it defaults to "¶". Set it to None or the empty
# string to disable permalinks.
# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_add_permalinks
html_add_permalinks = "¶"
# True to prefix each section label with the name of the document it is in, followed by a colon.
# For example, index:Introduction for a section called Introduction that appears in document index.rst.
# Useful for avoiding ambiguity when the same section heading appears in different documents.
# http://www.sphinx-doc.org/en/master/usage/extensions/autosectionlabel.html
autosectionlabel_prefix_document = True
# only run doctests marked with a ".. doctest::" directive
doctest_test_doctest_blocks = ''
doctest_global_setup = """
import importlib
import os
import torch
import pytorch_lightning as pl
from pytorch_lightning import Trainer, LightningModule
"""
coverage_skip_undoc_in_source = True
|
[] |
[] |
[
"SPHINX_MOCK_REQUIREMENTS"
] |
[]
|
["SPHINX_MOCK_REQUIREMENTS"]
|
python
| 1 | 0 | |
internal/compute/replace_command_test.go
|
package compute
import (
"context"
"os"
"regexp"
"testing"
"github.com/hexops/autogold"
"github.com/sourcegraph/sourcegraph/internal/comby"
)
func Test_replace(t *testing.T) {
test := func(input string, cmd *Replace) string {
result, err := replace(context.Background(), []byte(input), cmd.SearchPattern, cmd.ReplacePattern)
if err != nil {
return err.Error()
}
return result.Value
}
autogold.Want(
"regexp search replace",
"needs a bit more queryrunner").
Equal(t, test("needs more queryrunner", &Replace{
SearchPattern: &Regexp{Value: regexp.MustCompile(`more (\w+)`)},
ReplacePattern: "a bit more $1",
}))
// If we are not on CI skip the test if comby is not installed.
if os.Getenv("CI") == "" && !comby.Exists() {
t.Skip("comby is not installed on the PATH. Try running 'bash <(curl -sL get.comby.dev)'.")
}
autogold.Want(
"structural search replace",
"foo(baz, bar)").
Equal(t, test("foo(bar, baz)", &Replace{
SearchPattern: &Comby{Value: `foo(:[x], :[y])`},
ReplacePattern: "foo(:[y], :[x])",
}))
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
example_project/create_superuser.py
|
#!/usr/bin/env python
"""
Copyright 2017 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import django
if __name__ == '__main__':
# Configure Django before importing models
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_project.settings")
django.setup()
# Import required models for this script
from django.contrib.auth.models import User
username = 'admin'
password = 'admin123'
if not User.objects.filter(username=username).first():
new_user = User.objects.create_superuser(
username=username,
email='[email protected]',
password=password,
)
new_user.first_name = 'Admin'
new_user.last_name = 'User'
new_user.save()
print(
"Created superuser '{username}' with password '{password}'".format(
username=username,
password=password,
)
)
else:
print("User '{}' already exists, doing nothing.".format(username))
sys.exit(0)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
app/redis.go
|
package app
import (
"os"
"time"
"github.com/garyburd/redigo/redis"
)
func (app *App) reconnectRedisIfNeeeded() {
res, _ := app.RedisConn.Do("PING")
if pong, ok := res.([]byte); !ok || string(pong) != "PONG" {
err := app.setupRedis()
if err != nil {
panic(err)
}
}
}
func (app *App) setupRedis() error {
connectTimeout := 1 * time.Second
readTimeout := 1 * time.Second
writeTimeout := 1 * time.Second
if url := os.Getenv("REDIS_URL"); url != "" {
conn, err := redis.DialURL(url,
redis.DialConnectTimeout(connectTimeout),
redis.DialReadTimeout(readTimeout),
redis.DialWriteTimeout(writeTimeout))
if err != nil {
return err
}
app.RedisConn = conn
return nil
}
conn, err := redis.Dial("tcp", ":6379",
redis.DialConnectTimeout(connectTimeout),
redis.DialReadTimeout(readTimeout),
redis.DialWriteTimeout(writeTimeout))
if err != nil {
return err
}
app.RedisConn = conn
return nil
}
|
[
"\"REDIS_URL\""
] |
[] |
[
"REDIS_URL"
] |
[]
|
["REDIS_URL"]
|
go
| 1 | 0 | |
app/server/controller/handler/handlers.go
|
/*
Copyright 2019 LitmusChaos Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
"github.com/litmuschaos/charthub.litmuschaos.io/app/server/pkg/analytics"
"github.com/litmuschaos/charthub.litmuschaos.io/app/server/pkg/community"
)
// ChaosChartPath refers the location of the freshly updated repository
var ChaosChartPath = os.Getenv("GOPATH") + "/src/github.com/litmuschaos/charthub.litmuschaos.io/app/client/public/chaos-charts/"
var githubData = os.Getenv("GOPATH") + "/src/github.com/litmuschaos/charthub.litmuschaos.io/app/client/public/githubData/"
/* pathParser reads the path in the csv file <path> forms the system-path <fileLookedPath>
and returns the file
*/
func pathParser(path string) ([]byte, error) {
var fileLookedPath = ChaosChartPath + path
fileContent, err := ioutil.ReadFile(fileLookedPath)
if err != nil {
return nil, fmt.Errorf("unable to read file, error: %v", err)
}
return fileContent, nil
}
//GetIconHandler takes the experiment group and icon file required and returns the specific icon file
func GetIconHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
img, err := os.Open(ChaosChartPath + vars["version"]+"/charts/" + vars["expGroup"] + "/icons/" + vars["iconFile"])
responseStatusCode := 200
if err != nil {
responseStatusCode = 500
log.Error(err)
fmt.Fprint(w, "icon cannot be fetched, err : "+err.Error())
}
defer img.Close()
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(responseStatusCode)
w.Header().Set("Content-Type", "image/png") // <-- set the content-type header
io.Copy(w, img)
}
//FileHandler takes out the file paths from the query params respectives URLs
func FileHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
filePath, ok := r.URL.Query()["file"]
if !ok || len(filePath[0]) < 1 {
return
}
fileContent, err := pathParser(vars["version"] + "/" + string(filePath[0]))
if err != nil {
log.Error(err)
fmt.Fprint(w, "file content parsing error, err : "+err.Error())
}
(w).Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprintf(w, string(fileContent))
}
// GetAnalyticsData gets the data from GA instance
func GetAnalyticsData(w http.ResponseWriter, r *http.Request) {
out, err := json.Marshal(analytics.GAResponseJSONObject)
responseStatusCode := 200
if err != nil {
log.Error(err)
responseStatusCode = 500
fmt.Fprint(w, "unable to get analytics data, err : "+err.Error())
}
writeHeaders(&w, responseStatusCode)
(w).Header().Set("Access-Control-Allow-Origin", "*")
w.Write(out)
}
// GetChart is used to create YAML objects from experiments' directories from the respective charts'
func GetChart(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
filePath := ChaosChartPath + vars["version"] + "/charts/" + vars["chartId"]
chart, err := getYAMLFileContent(filePath)
response, err := json.Marshal(chart)
responseStatusCode := 200
if err != nil {
responseStatusCode = 500
fmt.Fprint(w, "chart retrieval error, err : "+err.Error())
}
writeHeaders(&w, responseStatusCode)
(w).Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, string(response))
}
func readCSVFile(path string) (Chart, error) {
var chart Chart
csvFile, err := ioutil.ReadFile(path)
if err != nil {
return chart, fmt.Errorf("unable to read file, err: %+v", err)
}
err = yaml.Unmarshal([]byte(csvFile), &chart)
return chart, err
}
func readPackageFile(path string) (PackageInformation, error) {
var packageInfo PackageInformation
packageFile, err := ioutil.ReadFile(path)
if err != nil {
return packageInfo, fmt.Errorf("file path of the,err: %+v", err)
}
log.Printf("package info %s", packageInfo)
err = yaml.Unmarshal([]byte(packageFile), &packageInfo)
return packageInfo, err
}
func readExperimentFile(path string) (Chart, error) {
var experiment Chart
experimentFile, err := ioutil.ReadFile(path)
if err != nil {
return experiment, fmt.Errorf("file path of the, err: %+v", err)
}
if yaml.Unmarshal([]byte(experimentFile), &experiment) != nil {
return experiment, err
}
return experiment, nil
}
func getYAMLFileContent(fileLocation string) (Chart, error) {
chartPathSplitted := strings.Split(fileLocation, "/")
CSVFilePath := fileLocation + "/" + chartPathSplitted[len(chartPathSplitted)-1] + ".chartserviceversion.yaml"
packageFilePath := fileLocation + "/" + chartPathSplitted[len(chartPathSplitted)-1] + ".package.yaml"
chart, err := readCSVFile(CSVFilePath)
if err != nil {
return chart, err
}
packageInfo, err := readPackageFile(packageFilePath)
if err != nil {
return chart, err
}
chart.PackageInfo = packageInfo
for _, exp := range packageInfo.Experiments {
experimentFilePath := fileLocation + "/" + exp.Name + "/" + exp.Name + ".chartserviceversion.yaml"
experiment, err := readExperimentFile(experimentFilePath)
if err != nil {
log.Error(err)
}
chart.Experiments = append(chart.Experiments, experiment)
}
return chart, nil
}
// GetCharts is used to create list of YAML objects from charts' directories
func GetCharts(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
files, err := filepath.Glob(ChaosChartPath + vars["version"] + "/charts/*")
if err != nil {
log.Error(err)
}
var charts []Chart
for _, fileName := range files {
chart, err := getYAMLFileContent(fileName)
if err != nil {
log.Error(err)
fmt.Fprint(w, "file content yaml conversion error, err : "+err.Error())
}
charts = append(charts, chart)
}
response, err := json.Marshal(charts)
if err != nil {
fmt.Fprint(w, "chart marshalling error, err : "+err.Error())
}
writeHeaders(&w, 200)
(w).Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, string(response))
}
// GetChartVersion will return the available version of chaos-chart
func GetChartVersion(w http.ResponseWriter, r *http.Request) {
version, err := ioutil.ReadFile("/tmp/version.json")
if err != nil {
log.Error(err)
return
}
writeHeaders(&w, 200)
(w).Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, string(version))
}
func writeHeaders(w *http.ResponseWriter, statusCode int) {
(*w).Header().Set("Content-Type", "application/json; charset=UTF-8")
(*w).Header().Set("Access-Control-Allow-Origin", "*")
(*w).Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
(*w).Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
(*w).WriteHeader(statusCode)
}
// GetGithubRepoData will return the github repo data for litmus
func GetGithubRepoData(w http.ResponseWriter, r *http.Request) {
response, err := ioutil.ReadFile(githubData + "githubRepoData.json")
responseStatusCode := 200
if err != nil {
responseStatusCode = 500
fmt.Errorf("unable to read file, error: %v", err)
}
writeHeaders(&w, responseStatusCode)
fmt.Fprint(w, string(response))
}
// GetCommunityAnalyticsData returns all the analytics data related to the community
func GetCommunityAnalyticsData(w http.ResponseWriter, r *http.Request) {
data, err := community.GetAnalytics()
responseStatusCode := 200
if err != nil {
log.Error(err)
responseStatusCode = 500
fmt.Fprint(w, "unable to get community analytics data, err : "+err.Error())
}
writeHeaders(&w, responseStatusCode)
(w).Header().Set("Access-Control-Allow-Origin", "*")
w.Write(data)
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
api/api/apis/search.py
|
# -*- coding: utf-8 -*-
"""
@author: Chris Lucas
"""
import os
import json
from flask_restplus import Namespace, Resource, fields, reqparse
from werkzeug.exceptions import NotFound
from psycopg2.extras import RealDictCursor
from core.constants import DEMO_FIND_SPOTS
from core.db import get_db
api = Namespace(
'Search',
description='Search the database.'
)
geometry_fields = api.model('geometry', {
'type': fields.String(),
'coordinates': fields.List(fields.Float),
})
geometry_nested = fields.Nested(geometry_fields)
geometry_wildcard = fields.Wildcard(geometry_nested)
search_result = api.model('Search Result', {
'find_spot_id': fields.Integer(description='The ID of the find spot.'),
'geometry': geometry_wildcard,
'shortend_description': fields.String(description='A shortend description of the find spot.')
})
parser = reqparse.RequestParser()
parser.add_argument('query', type=str, help='The search query.')
@api.route('/')
class Search(Resource):
@api.expect(parser)
@api.marshal_with(search_result)
def get(self):
"""
List all find spots in database with a match with the search query, ranked by computed relevancy.
"""
args = parser.parse_args()
db = get_db()
cursor = db.cursor(cursor_factory=RealDictCursor)
search_query = args.query.strip()
find_spot_match = None
if search_query.isdecimal():
query = """
SELECT fs."find_spot_ID" find_spot_id,
ST_AsGeoJSON(fsp.geom) geometry,
LEFT(fs.description, 50) shortend_description
FROM seslr.find_spot fs JOIN seslr.find_spot_points fsp ON fs."find_spot_ID" = fsp.find_spot_id
WHERE fs."find_spot_ID" = %(search_query)s
"""
cursor.execute(query, {'search_query': search_query})
find_spot_match = cursor.fetchone()
query = """
WITH search_results AS (
SELECT fs.find_spot_id,
ts_rank(fs.description_tokens, websearch_to_tsquery(%(search_query)s)) AS rank1,
ts_rank(fs.type_tokens, websearch_to_tsquery(%(search_query)s)) AS rank2,
ts_rank(fs.toponym_tokens, websearch_to_tsquery(%(search_query)s)) AS rank3,
ts_rank(f.description_tokens, websearch_to_tsquery(%(search_query)s)) AS rank4,
ts_rank(f.features_tokens, websearch_to_tsquery(%(search_query)s)) AS rank5,
ts_rank(f.features_architecture_tokens, websearch_to_tsquery(%(search_query)s)) AS rank6
FROM seslr.find_spot_search_vectors fs JOIN seslr.find_search_vectors f ON fs.find_spot_id = f.find_spot_id
WHERE fs.description_tokens @@ websearch_to_tsquery(%(search_query)s)
OR fs.type_tokens @@ websearch_to_tsquery(%(search_query)s)
OR fs.toponym_tokens @@ websearch_to_tsquery(%(search_query)s)
OR f.description_tokens @@ websearch_to_tsquery(%(search_query)s)
OR f.features_tokens @@ websearch_to_tsquery(%(search_query)s)
OR f.features_architecture_tokens @@ websearch_to_tsquery(%(search_query)s)
)
SELECT sr.find_spot_id,
ST_AsGeoJSON(fsp.geom) geometry,
LEFT(fs.description, 50) shortend_description,
coalesce(sr.rank1, 0) + coalesce(sr.rank2, 0) + coalesce(sr.rank3, 0) + coalesce(sr.rank4, 0) + coalesce(sr.rank5, 0) + coalesce(sr.rank6, 0) AS rank
FROM search_results sr JOIN seslr.find_spot fs ON sr.find_spot_id = fs."find_spot_ID" JOIN seslr.find_spot_points fsp ON sr.find_spot_id = fsp.find_spot_id
ORDER BY rank DESC
"""
cursor.execute(query, {'search_query': search_query})
results = cursor.fetchall()
cursor.close()
if results is None and find_spot_match is None:
raise NotFound('No matches found in database.')
else:
if find_spot_match is not None:
results.insert(0, find_spot_match)
if os.environ['SESLR_APP_MODE'] == 'demo':
results = [r for r in results if r['find_spot_id'] in DEMO_FIND_SPOTS]
for r in results:
r['geometry'] = json.loads(r['geometry'])
return results
|
[] |
[] |
[
"SESLR_APP_MODE"
] |
[]
|
["SESLR_APP_MODE"]
|
python
| 1 | 0 | |
integration/traffic/livetraffic/trafficproxy/csv_string_test.go
|
package trafficproxy
import "testing"
func TestFlowCSVString(t *testing.T) {
cases := []struct {
f Flow
csvString string
humanFriendlyCSVString string
}{
{
Flow{WayID: 829733412, Speed: 20.280001, TrafficLevel: TrafficLevel_FREE_FLOW, Timestamp: 1579419488000},
"829733412,20.280001,7,1579419488000",
"829733412,20.280001,FREE_FLOW,1579419488000",
},
{
Flow{WayID: -129639168, Speed: 31.389999, TrafficLevel: TrafficLevel_FREE_FLOW, Timestamp: 1579419488000},
"-129639168,31.389999,7,1579419488000",
"-129639168,31.389999,FREE_FLOW,1579419488000",
},
}
for _, c := range cases {
cs := c.f.CSVString()
if cs != c.csvString {
t.Errorf("flow: %v, expect csv string %s, but got %s", c.f, c.csvString, cs)
}
hs := c.f.HumanFriendlyCSVString()
if hs != c.humanFriendlyCSVString {
t.Errorf("flow: %v, expect human friendly csv string %s, but got %s", c.f, c.humanFriendlyCSVString, hs)
}
}
}
func TestIncidentCSVString(t *testing.T) {
cases := []struct {
incident Incident
s string
humanFriendlyCSVString string
}{
{
Incident{
IncidentID: "TTI-f47b8dba-59a3-372d-9cec-549eb252e2d5-TTR46312939215361-1",
AffectedWayIDs: []int64{100663296, -1204020275, 100663296, -1204020274, 100663296, -916744017, 100663296, -1204020245, 100663296, -1194204646, 100663296, -1204394608, 100663296, -1194204647, 100663296, -129639168, 100663296, -1194204645},
IncidentType: IncidentType_MISCELLANEOUS,
IncidentSeverity: IncidentSeverity_CRITICAL,
IncidentLocation: &Location{Lat: 44.181220, Lon: -117.135840},
Description: "Construction on I-84 EB near MP 359, Drive with caution.",
FirstCrossStreet: "",
SecondCrossStreet: "",
StreetName: "I-84 E",
EventCode: 500,
AlertCEventQuantifier: 0,
IsBlocking: false,
Timestamp: 1579419488000,
},
"TTI-f47b8dba-59a3-372d-9cec-549eb252e2d5-TTR46312939215361-1,\"100663296,-1204020275,100663296,-1204020274,100663296,-916744017,100663296,-1204020245,100663296,-1194204646,100663296,-1204394608,100663296,-1194204647,100663296,-129639168,100663296,-1194204645\",5,1,44.181220,-117.135840,\"Construction on I-84 EB near MP 359, Drive with caution.\",,,I-84 E,500,0,0,1579419488000",
"TTI-f47b8dba-59a3-372d-9cec-549eb252e2d5-TTR46312939215361-1,\"100663296,-1204020275,100663296,-1204020274,100663296,-916744017,100663296,-1204020245,100663296,-1194204646,100663296,-1204394608,100663296,-1194204647,100663296,-129639168,100663296,-1194204645\",MISCELLANEOUS,CRITICAL,44.181220,-117.135840,\"Construction on I-84 EB near MP 359, Drive with caution.\",,,I-84 E,500,0,false,1579419488000",
},
}
for _, c := range cases {
s := c.incident.CSVString()
if s != c.s {
t.Errorf("incident: %v, expect csv string %s, but got %s", c.incident, c.s, s)
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/main/java/com/emeraldquest/emeraldquest/EmeraldQuest.java
|
package com.emeraldquest.emeraldquest;
import java.io.*;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.ProtocolException;
import java.net.URL;
import java.text.ParseException;
import java.util.*;
import com.emeraldquest.emeraldquest.commands.*;
import org.bukkit.*;
import org.bukkit.World.Environment;
import org.bukkit.command.Command;
import org.bukkit.command.CommandSender;
import org.bukkit.entity.*;
import org.bukkit.event.server.ServerListPingEvent;
import org.bukkit.inventory.ItemStack;
import org.bukkit.inventory.meta.ItemMeta;
import org.bukkit.metadata.FixedMetadataValue;
import org.bukkit.plugin.java.JavaPlugin;
import org.bukkit.potion.PotionEffect;
import org.bukkit.potion.PotionEffectType;
import org.bukkit.scheduler.BukkitScheduler;
import org.bukkit.scoreboard.*;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import javax.net.ssl.HttpsURLConnection;
/**
* Created by explodi on 11/1/15.
*/
public class EmeraldQuest extends JavaPlugin {
// TODO: remove env variables not being used anymore
// Connecting to REDIS
// Links to the administration account via Environment Variables
public final static String EMERALDQUEST_ENV = System.getenv("EMERALDQUEST_ENV") != null ? System.getenv("EMERALDQUEST_ENV") : "development";
public final static UUID ADMIN_UUID = System.getenv("ADMIN_UUID") != null ? UUID.fromString(System.getenv("ADMIN_UUID")) : null;
public final static Integer DENOMINATION_FACTOR = System.getenv("DENOMINATION_FACTOR") != null ? Integer.parseInt(System.getenv("DENOMINATION_FACTOR")) : 1;
//these make it so land price/max and min loot can be set in env
public final static Integer LAND_PRICE = System.getenv("LAND_PRICE") != null ? Integer.parseInt(System.getenv("LAND_PRICE")) : 15;
public final static Integer MIN_LOOT = System.getenv("MIN_LOOT") != null ? Integer.parseInt(System.getenv("MIN_LOOT")) : 5;
public final static Integer MAX_LOOT = System.getenv("MAX_LOOT") != null ? Integer.parseInt(System.getenv("MAX_LOOT")) : LAND_PRICE;
public final static Integer LOOTIPLIER = System.getenv("LOOTIPLIER") != null ? Integer.parseInt(System.getenv("LOOTIPLIER")) : 5;
public final static String DENOMINATION_NAME = System.getenv("DENOMINATION_NAME") != null ? System.getenv("DENOMINATION_NAME") : "Emeralds";
public final static String SET_PvP = System.getenv("SET_PvP") != null ? System.getenv("SET_PvP") : "false";
public final static int MAX_STOCK = 100;
public final static String SERVER_NAME = System.getenv("SERVER_NAME") != null ? System.getenv("SERVER_NAME") : "EmeraldQuest";
// Support for statsd is optional but really cool
public final static String STATSD_HOST = System.getenv("STATSD_HOST") != null ? System.getenv("STATSD_HOST") : null;
public final static String STATSD_PREFIX = System.getenv("STATSD_PREFIX") != null ? System.getenv("STATSD_PREFIX") : "emerladquest";
public final static String STATSD_PORT = System.getenv("STATSD_PORT") != null ? System.getenv("STATSD_PORT") : "8125";
// Support for mixpanel analytics
public final static String MIXPANEL_TOKEN = System.getenv("MIXPANEL_TOKEN") != null ? System.getenv("MIXPANEL_TOKEN") : null;
// Support for slack bot
// REDIS: Look for Environment variables on hostname and port, otherwise defaults to localhost:6379
public final static String REDIS_HOST = System.getenv("REDIS_1_PORT_6379_TCP_ADDR") != null ? System.getenv("REDIS_1_PORT_6379_TCP_ADDR") : "localhost";
public final static Integer REDIS_PORT = System.getenv("REDIS_1_PORT_6379_TCP_PORT") != null ? Integer.parseInt(System.getenv("REDIS_1_PORT_6379_TCP_PORT")) : 6379;
public final static Jedis REDIS = new Jedis(REDIS_HOST, REDIS_PORT);
// FAILS
// public final static JedisPool REDIS_POOL = new JedisPool(new JedisPoolConfig(), REDIS_HOST, REDIS_PORT);
// Minimum transaction by default is 2000 bits
// utilities: distance and rand
public static int distance(Location location1, Location location2) {
return (int) location1.distance(location2);
}
public static int rand(int min, int max) {
return min + (int) (Math.random() * ((max - min) + 1));
}
public Player last_loot_player;
public boolean spookyMode = false;
public boolean rate_limit = false;
// caches is used to reduce the amounts of calls to redis, storing some chunk information in memory
public HashMap<String, Boolean> land_unclaimed_cache = new HashMap();
public HashMap<String, String> land_owner_cache = new HashMap();
public HashMap<String, String> land_permission_cache = new HashMap();
public HashMap<String, String> land_name_cache = new HashMap();
public ArrayList<ItemStack> books = new ArrayList<ItemStack>();
// when true, server is closed for maintenance and not allowing players to join in.
public boolean maintenance_mode = false;
private Map<String, CommandAction> commands;
private Map<String, CommandAction> modCommands;
private Player[] moderators;
@Override
public void onEnable() {
log("EmeraldQuest starting");
REDIS.set("STARTUP", "1");
REDIS.expire("STARTUP", 300);
if (ADMIN_UUID == null) {
log("Warning: You haven't designated a super admin. Launch with ADMIN_UUID env variable to set.");
}
// registers listener classes
getServer().getPluginManager().registerEvents(new ChatEvents(this), this);
getServer().getPluginManager().registerEvents(new BlockEvents(this), this);
getServer().getPluginManager().registerEvents(new EntityEvents(this), this);
getServer().getPluginManager().registerEvents(new InventoryEvents(this), this);
getServer().getPluginManager().registerEvents(new SignEvents(this), this);
getServer().getPluginManager().registerEvents(new ServerEvents(this), this);
// player does not lose inventory on death
Bukkit.dispatchCommand(Bukkit.getConsoleSender(), "gamerule keepInventory on");
// loads config file. If it doesn't exist, creates it.
getDataFolder().mkdir();
if (!new java.io.File(getDataFolder(), "config.yml").exists()) {
saveDefaultConfig();
}
// sets the redis save intervals
REDIS.configSet("SAVE", "900 1 300 10 60 10000");
// Removes all entities on server restart. This is a workaround for when large numbers of entities grash the server. With the release of Minecraft 1.11 and "max entity cramming" this will be unnecesary.
// removeAllEntities();
//killAllVillagers();
createScheduledTimers();
// creates scheduled timers (update balances, etc)
createScheduledTimers();
commands = new HashMap<String, CommandAction>();
commands.put("land", new LandCommand(this));
commands.put("clan", new ClanCommand());
commands.put("transfer", new TransferCommand(this));
commands.put("send", new SendCommand(this));
commands.put("profession", new ProfessionCommand(this));
commands.put("spawn", new SpawnCommand(this));
modCommands = new HashMap<String, CommandAction>();
modCommands.put("butcher", new ButcherCommand());
modCommands.put("killAllVillagers", new KillAllVillagersCommand(this));
modCommands.put("crashTest", new CrashtestCommand(this));
modCommands.put("mod", new ModCommand());
modCommands.put("ban", new BanCommand());
modCommands.put("permban", new PermbanCommand());
modCommands.put("unban", new UnbanCommand());
modCommands.put("banlist", new BanlistCommand());
modCommands.put("spectate", new SpectateCommand(this));
modCommands.put("emergencystop", new EmergencystopCommand());
// TODO: Remove this command after migrate.
modCommands.put("migrateclans", new MigrateClansCommand());
}
public void updateScoreboard(final Player player) throws ParseException, org.json.simple.parser.ParseException, IOException {
User user = new User(player);
ScoreboardManager scoreboardManager;
Scoreboard playSBoard;
Objective playSBoardObj;
scoreboardManager = Bukkit.getScoreboardManager();
playSBoard = scoreboardManager.getNewScoreboard();
playSBoardObj = playSBoard.registerNewObjective("wallet", "dummy");
playSBoardObj.setDisplaySlot(DisplaySlot.SIDEBAR);
playSBoardObj.setDisplayName(ChatColor.GREEN + ChatColor.BOLD.toString() + "Emerald" + ChatColor.GOLD + ChatColor.BOLD.toString() + "Quest");
Score score = playSBoardObj.getScore(ChatColor.GREEN + EmeraldQuest.DENOMINATION_NAME);
int EmAmount = countEmeralds(player);
score.setScore(EmAmount);
player.setScoreboard(playSBoard);
}
public void teleportToSpawn(Player player) {
if (!player.hasMetadata("teleporting")) {
player.sendMessage(ChatColor.GREEN + "Teleporting to spawn...");
player.setMetadata("teleporting", new FixedMetadataValue(this, true));
World world = Bukkit.getWorld("world");
Location location = world.getSpawnLocation();
location.setX(5);
location.setY(74);
location.setZ(0);
final Location spawn = location;
Chunk c = spawn.getChunk();
if (!c.isLoaded()) {
c.load();
}
EmeraldQuest plugin = this;
BukkitScheduler scheduler = Bukkit.getServer().getScheduler();
scheduler.scheduleSyncDelayedTask(this, new Runnable() {
public void run() {
player.teleport(spawn);
player.removeMetadata("teleporting", plugin);
}
}, 60L);
}
}
public void createScheduledTimers() {
BukkitScheduler scheduler = Bukkit.getServer().getScheduler();
scheduler.scheduleSyncRepeatingTask(this, new Runnable() {
@Override
public void run() {
for (Player player : Bukkit.getServer().getOnlinePlayers()) {
User user = null;
try {
// user.createScoreBoard();
updateScoreboard(player);
} catch (ParseException e) {
e.printStackTrace();
} catch (org.json.simple.parser.ParseException e) {
e.printStackTrace();
} catch (IOException e) {
// TODO: Handle rate limiting
}
}
}
}, 0, 120L);
scheduler.scheduleSyncRepeatingTask(this, new Runnable() {
@Override
public void run() {
// A villager is born
// World world=Bukkit.getWorld("world");
// world.spawnEntity(world.getHighestBlockAt(world.getSpawnLocation()).getLocation(), EntityType.VILLAGER);
}
}, 0, 72000L);
scheduler.scheduleSyncRepeatingTask(this, new Runnable() {
@Override
public void run() {
run_season_events();
}
}, 0, 1200L);
scheduler.scheduleSyncRepeatingTask(this, new Runnable() {
@Override
public void run() {
reset_rate_limits();
}
}, 0, 100L);
}
public void run_season_events() {
java.util.Date date = new Date();
Calendar cal = Calendar.getInstance();
cal.setTime(date);
int month = cal.get(Calendar.MONTH);
if (month == 9) {
World world = this.getServer().getWorld("world");
world.setTime(20000);
world.setStorm(false);
spookyMode = true;
} else {
spookyMode = false;
}
}
public void removeAllEntities() {
World w = Bukkit.getWorld("world");
List<Entity> entities = w.getEntities();
int entitiesremoved = 0;
for (Entity entity : entities) {
entity.remove();
entitiesremoved = entitiesremoved + 1;
}
System.out.println("Killed " + entitiesremoved + " entities");
}
public void killAllVillagers() {
World w = Bukkit.getWorld("world");
List<Entity> entities = w.getEntities();
int villagerskilled = 0;
for (Entity entity : entities) {
if ((entity instanceof Villager)) {
villagerskilled = villagerskilled + 1;
((Villager) entity).remove();
}
}
w = Bukkit.getWorld("world_nether");
entities = w.getEntities();
for (Entity entity : entities) {
if ((entity instanceof Villager)) {
villagerskilled = villagerskilled + 1;
((Villager) entity).remove();
}
}
System.out.println("Killed " + villagerskilled + " villagers");
}
public void log(String msg) {
Bukkit.getLogger().info(msg);
}
public void success(Player recipient, String msg) {
recipient.sendMessage(ChatColor.GREEN + msg);
}
public void error(Player recipient, String msg) {
recipient.sendMessage(ChatColor.RED + msg);
}
public int getLevel(int exp) {
return (int) Math.floor(Math.sqrt(exp / (float) 256));
}
public int getExpForLevel(int level) {
return (int) Math.pow(level, 2) * 256;
}
public float getExpProgress(int exp) {
int level = getLevel(exp);
int nextlevel = getExpForLevel(level + 1);
int prevlevel = 0;
if (level > 0) {
prevlevel = getExpForLevel(level);
}
float progress = ((exp - prevlevel) / (float) (nextlevel - prevlevel));
return progress;
}
public void setTotalExperience(Player player) {
int rawxp = 0;
if (EmeraldQuest.REDIS.exists("experience.raw." + player.getUniqueId().toString())) {
rawxp = Integer.parseInt(EmeraldQuest.REDIS.get("experience.raw." + player.getUniqueId().toString()));
}
// lower factor, experience is easier to get. you can increase to get the opposite effect
int level = getLevel(rawxp);
float progress = getExpProgress(rawxp);
player.setLevel(level);
player.setExp(progress);
setPlayerMaxHealth(player);
}
public void setPlayerMaxHealth(Player player) {
// base health=6
// level health max=
int health = 8 + (player.getLevel() / 2);
if (isModerator(player) && (EmeraldQuest.REDIS.get("ModFlag " + player.getUniqueId().toString()).equals("true"))) {
health = 20 + (player.getLevel() / 2);
}
if (health > 40) health = 40;
// player.addPotionEffect(new PotionEffect(PotionEffectType.SPEED, Integer.MAX_VALUE, player.getLevel(), true));
player.setMaxHealth(health);
}
public void claimLand(final String name, Chunk chunk, final Player player) throws ParseException, org.json.simple.parser.ParseException, IOException {
// check that land actually has a name
String chunkname = "";
if (player.getWorld().getName().equals("world")) {
chunkname = "chunk";
} else if (player.getWorld().getName().equals("world_nether")) {
chunkname = "netherchunk";
} //gets which chunks for which world @bitcoinjake09
final int x = chunk.getX();
final int z = chunk.getZ();
System.out.println("[claim] " + player.getDisplayName() + " wants to claim a plot in " + player.getWorld().getName() + x + "," + z + " with name " + name);
if (!name.isEmpty()) {
// check that desired area name doesn't have non-alphanumeric characters
boolean hasNonAlpha = name.matches("^.*[^a-zA-Z0-9 _].*$");
if (!hasNonAlpha) {
// 16 characters max
if (name.length() <= 21) {
if (name.equalsIgnoreCase("the wilderness")) {
player.sendMessage(ChatColor.RED + "You cannot name your land that.");
return;
}
if (REDIS.get(chunkname + "" + x + "," + z + "owner") == null) {
User user = new User(player);
player.sendMessage(ChatColor.YELLOW + "Claiming land...");
BukkitScheduler scheduler = Bukkit.getServer().getScheduler();
EmeraldQuest emeraldQuest = this;
scheduler.runTask(this, new Runnable() {
@Override
public void run() {
// A villager is born
try {
//if ((player.getUniqueId().toString().equals(EmeraldQuest.ADMIN_UUID.toString()))||((removeEmeralds(player,(EmeraldQuest.LAND_PRICE / 100))) == true)) {
String chunkname = "";
int landiplier = 1;
if (player.getWorld().getName().equals("world")) {
chunkname = "chunk";
} else if (player.getWorld().getName().equals("world_nether")) {
chunkname = "netherchunk";
landiplier = 4;
} //gets which chunks for which world @bitcoinjake09
if (((removeEmeralds(player, (LAND_PRICE * landiplier))) == true)) {
EmeraldQuest.REDIS.set(chunkname + "" + x + "," + z + "owner", player.getUniqueId().toString());
EmeraldQuest.REDIS.set(chunkname + "" + x + "," + z + "name", name);
land_owner_cache = new HashMap();
land_name_cache = new HashMap();
land_unclaimed_cache = new HashMap();
player.sendMessage(ChatColor.GREEN + "Congratulations! You're now the owner of " + name + "!");
player.sendMessage(ChatColor.YELLOW + "Price was " + (LAND_PRICE * landiplier) + " Emeralds");
} else {
//int balance = new User(player).wallet.balance();
if (countEmeralds(player) < (LAND_PRICE * landiplier)) {
player.sendMessage(ChatColor.RED + "You don't have enough money! You need " + ChatColor.BOLD + Math.ceil(((LAND_PRICE * landiplier) - countEmeralds(player))) + ChatColor.RED + " more emeralds.");
} else {
player.sendMessage(ChatColor.RED + "Claim payment failed. Please try again later.");
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
});
} else if (REDIS.get(chunkname + "" + x + "," + z + "owner").equals(player.getUniqueId().toString()) || isModerator(player)) {
if (name.equals("abandon")) {
// Abandon land
EmeraldQuest.REDIS.del(chunkname + "" + x + "," + z + "owner");
EmeraldQuest.REDIS.del(chunkname + "" + x + "," + z + "name");
EmeraldQuest.REDIS.del(chunkname + "" + x + "," + z + "permissions");
} else if (name.startsWith("transfer ") && name.length() > 1) {
// If the name starts with "transfer " and has at least one more character,
// transfer land
final String newOwner = name.substring(9);
player.sendMessage(ChatColor.YELLOW + "Transfering land to " + newOwner + "...");
if (REDIS.exists("uuid:" + newOwner)) {
String newOwnerUUID = REDIS.get("uuid:" + newOwner);
EmeraldQuest.REDIS.set(chunkname + "" + x + "," + z + "owner", newOwnerUUID);
player.sendMessage(ChatColor.GREEN + "This land now belongs to " + newOwner);
} else {
player.sendMessage(ChatColor.RED + "Could not find " + newOwner + ". Did you misspell their name?");
}
} else if (EmeraldQuest.REDIS.get(chunkname + "" + x + "," + z + "name").equals(name)) {
player.sendMessage(ChatColor.RED + "You already own this land!");
} else {
// Rename land
player.sendMessage(ChatColor.GREEN + "You renamed this land to " + name + ".");
EmeraldQuest.REDIS.set(chunkname + "" + x + "," + z + "name", name);
}
}
} else {
player.sendMessage(ChatColor.RED + "Your land name must be 16 characters max");
}
} else {
player.sendMessage(ChatColor.RED + "Your land name must contain only letters and numbers");
}
} else {
player.sendMessage(ChatColor.RED + "Your land must have a name");
}
}
public boolean isOwner(Location location, Player player) {
String key = "";
if (player.getWorld().getName().equals("world")) {
key = "chunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "owner";
if (land_owner_cache.containsKey(key)) {
if (land_owner_cache.get(key).equals(player.getUniqueId().toString())) {
return true;
} else {
return false;
}
} else if (REDIS.get(key).equals(player.getUniqueId().toString())) {
// player is the owner of the chunk
return true;
}
} else if (player.getWorld().getName().equals("world_nether")) {
key = "netherchunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "owner";
if (land_owner_cache.containsKey(key)) {
if (land_owner_cache.get(key).equals(player.getUniqueId().toString())) {
return true;
} else {
return false;
}
} else if (REDIS.get(key).equals(player.getUniqueId().toString())) {
// player is the owner of the chunk
return true;
}
} else {
return false;
}
return false;
}
public boolean canBuild(Location location, Player player) {
// returns true if player has permission to build in location
// TODO: Find out how are we gonna deal with clans and locations, and how/if they are gonna share land resources
try {
if (isModerator(player)) {
if (EmeraldQuest.REDIS.get("ModFlag " + player.getUniqueId().toString()).equals("true")) {
return true;
}
}//end mod
} catch (NullPointerException nullPointer) {
//System.out.println("modflag: "+nullPointer);
}
if (location.getWorld().getEnvironment().equals(Environment.THE_END)) {
// If theyre not in the overworld, they cant build
return false;
} else if (landIsClaimed(location)) {
if (isOwner(location, player)) {
return true;
} else if (landPermissionCode(location).equals("p")) {
return true;
} else if (landPermissionCode(location).equals("pv")) {
return true;// add land permission pv for public Pvp by @bitcoinjake09
} else if (landPermissionCode(location).equals("c") == true) {
if (player.getWorld().getName().equals("world")) {
String owner_uuid = REDIS.get("chunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "owner");
System.out.println(owner_uuid);
String owner_clan = REDIS.get("clan:" + owner_uuid);
System.out.println(owner_clan);
String player_clan = REDIS.get("clan:" + player.getUniqueId().toString());
System.out.println(player_clan);
if (owner_clan.equals(player_clan)) {
return true;
} else {
return false;
}
}//end world lol @bitcoinjake09
else if (player.getWorld().getName().equals("world_nether")) {
String owner_uuid = REDIS.get("netherchunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "owner");
System.out.println(owner_uuid);
String owner_clan = REDIS.get("clan:" + owner_uuid);
System.out.println(owner_clan);
String player_clan = REDIS.get("clan:" + player.getUniqueId().toString());
System.out.println(player_clan);
if (owner_clan.equals(player_clan)) {
return true;
} else {
return false;
}
}//world_nether @bitcoinjake09
} else {
return false;
}
} else {
return true;
}
return true;
}
public String landPermissionCode(Location location) {
// permission codes:
// p = public
// c = clan
// v = PvP(private cant build) by @bitcoinjake09
// pv= public PvP(can build) by @bitcoinjake09
// n = no permissions (private)
// added netherchunks @bitcoinjake09
String key = "";
if (location.getWorld().getName().equals("world")) {
key = "chunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "permissions";
if (land_permission_cache.containsKey(key)) {
return land_permission_cache.get(key);
} else if (REDIS.exists(key)) {
String code = REDIS.get(key);
land_permission_cache.put(key, code);
return code;
}
} else if (location.getWorld().getName().equals("world_nether")) {
key = "netherchunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "permissions";
if (land_permission_cache.containsKey(key)) {
return land_permission_cache.get(key);
} else if (REDIS.exists(key)) {
String code = REDIS.get(key);
land_permission_cache.put(key, code);
return code;
}
} else {
return "n";
}
return "n";
}
public boolean createNewArea(Location location, Player owner, String name, int size) {
// write the new area to REDIS
JsonObject areaJSON = new JsonObject();
areaJSON.addProperty("size", size);
areaJSON.addProperty("owner", owner.getUniqueId().toString());
areaJSON.addProperty("name", name);
areaJSON.addProperty("x", location.getX());
areaJSON.addProperty("z", location.getZ());
areaJSON.addProperty("uuid", UUID.randomUUID().toString());
REDIS.lpush("areas", areaJSON.toString());
// TODO: Check if redis actually appended the area to list and return the success of the operation
return true;
}
public boolean isModerator(Player player) {
if (REDIS.sismember("moderators", player.getUniqueId().toString())) {
return true;
} else if (ADMIN_UUID != null && player.getUniqueId().toString().equals(ADMIN_UUID.toString())) {
return true;
}
return false;
}
public boolean landIsClaimed(Location location) {
String key = "";
if (location.getWorld().getName().equals("world")) {
key = "chunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "owner";
if (land_unclaimed_cache.containsKey(key)) {
return false;
} else if (land_owner_cache.containsKey(key)) {
return true;
} else {
if (REDIS.exists(key) == true) {
land_owner_cache.put(key, REDIS.get(key));
return true;
} else {
land_unclaimed_cache.put(key, true);
return false;
}
}
}//end world lmao @bitcoinjake09
else if (location.getWorld().getName().equals("world_nether")) {
key = "netherchunk" + location.getChunk().getX() + "," + location.getChunk().getZ() + "owner";
if (land_unclaimed_cache.containsKey(key)) {
return false;
} else if (land_owner_cache.containsKey(key)) {
return true;
} else {
if (REDIS.exists(key) == true) {
land_owner_cache.put(key, REDIS.get(key));
return true;
} else {
land_unclaimed_cache.put(key, true);
return false;
}
}
}//end nether
else {
land_unclaimed_cache.put(key, true);
return false;
}
}
@Override
public boolean onCommand(CommandSender sender, Command cmd, String label, String[] args) {
// we don't allow server commands (yet?)
if (sender instanceof Player) {
final Player player = (Player) sender;
// PLAYER COMMANDS
for (Map.Entry<String, CommandAction> entry : commands.entrySet()) {
if (cmd.getName().equalsIgnoreCase(entry.getKey())) {
entry.getValue().run(sender, cmd, label, args, player);
}
}
// MODERATOR COMMANDS
for (Map.Entry<String, CommandAction> entry : modCommands.entrySet()) {
if (cmd.getName().equalsIgnoreCase(entry.getKey())) {
if (isModerator(player)) {
entry.getValue().run(sender, cmd, label, args, player);
} else {
sender.sendMessage("You don't have enough permissions to execute this command!");
}
}
}
}
return true;
}
public void crashtest() {
this.setEnabled(false);
}
public void reset_rate_limits() {
rate_limit = false;
}
// the isPvP function by @bitcoinjake09
public boolean isPvP(Location location) {
if ((landPermissionCode(location).equals("v") == true) || (landPermissionCode(location).equals("pv") == true))
if (SET_PvP.equals("true")) {
return true;
}// returns true. it is a pvp or public pvp and if SET_PvP is true
return false;//not pvp
}
// end isPvP by @bitcoinjake09
public static int countEmeralds(Player player) {
ItemStack[] items = player.getInventory().getContents();
int amount = 0;
for (int i = 0; i < player.getInventory().getSize(); i++) {
ItemStack TempStack = items[i];
if ((TempStack != null) && (TempStack.getType() != Material.AIR)) {
if (TempStack.getType().toString() == "EMERALD_BLOCK") {
amount += (TempStack.getAmount() * 9);
} else if (TempStack.getType().toString() == "EMERALD") {
amount += TempStack.getAmount();
}
}
}
return amount;
}//end count emerald in player inventory by @bitcoinjake09
public boolean removeEmeralds(Player player, int amount) {
int EmCount = countEmeralds(player);
int LessEmCount = countEmeralds(player) - amount;
double TempAmount = (double) amount;
int EmsBack = 0;
ItemStack[] items = player.getInventory().getContents();
if (countEmeralds(player) >= amount) {
while (TempAmount > 0) {
for (int i = 0; i < player.getInventory().getSize(); i++) {
ItemStack TempStack = items[i];
if ((TempStack != null) && (TempStack.getType() != Material.AIR)) {
if ((TempStack.getType().toString() == "EMERALD_BLOCK") && (TempAmount >= 9)) {
player.getInventory().removeItem(new ItemStack(Material.EMERALD_BLOCK, 1));
TempAmount = TempAmount - 9;
}
if ((TempStack.getType().toString() == "EMERALD_BLOCK") && (TempAmount < 9)) {
player.getInventory().removeItem(new ItemStack(Material.EMERALD_BLOCK, 1));
EmsBack = (9 - (int) TempAmount); //if 8, ems back = 1
TempAmount = TempAmount - TempAmount;
if (EmsBack > 0) {
player.getInventory().addItem(new ItemStack(Material.EMERALD, EmsBack));
}
}
if ((TempStack.getType().toString() == "EMERALD") && (TempAmount >= 1)) {
player.getInventory().removeItem(new ItemStack(Material.EMERALD, 1));
TempAmount = TempAmount - 1;
}
}//end if != Material.AIR
}// end for loop
}//end while loop
}//end (EmCount>=amount)
EmCount = countEmeralds(player);
if ((EmCount == LessEmCount) || (TempAmount == 0))
return true;
return false;
}//end of remove emeralds
//start addemeralds to inventory
public boolean addEmeralds(Player player, int amount) {
int EmCount = countEmeralds(player);
int moreEmCount = countEmeralds(player) + amount;
double bits = (double) amount;
double TempAmount = (double) amount;
int EmsBack = 0;
while (TempAmount >= 0) {
if (TempAmount >= 9) {
TempAmount = TempAmount - 9;
player.getInventory().addItem(new ItemStack(Material.EMERALD_BLOCK, 1));
}
if (TempAmount < 9) {
TempAmount = TempAmount - 1;
player.getInventory().addItem(new ItemStack(Material.EMERALD, 1));
}
EmCount = countEmeralds(player);
if ((EmCount == moreEmCount))
return true;
}//end while loop
return false;
}
}
|
[
"\"EMERALDQUEST_ENV\"",
"\"EMERALDQUEST_ENV\"",
"\"ADMIN_UUID\"",
"\"ADMIN_UUID\"",
"\"DENOMINATION_FACTOR\"",
"\"DENOMINATION_FACTOR\"",
"\"LAND_PRICE\"",
"\"LAND_PRICE\"",
"\"MIN_LOOT\"",
"\"MIN_LOOT\"",
"\"MAX_LOOT\"",
"\"MAX_LOOT\"",
"\"LOOTIPLIER\"",
"\"LOOTIPLIER\"",
"\"DENOMINATION_NAME\"",
"\"DENOMINATION_NAME\"",
"\"SET_PvP\"",
"\"SET_PvP\"",
"\"SERVER_NAME\"",
"\"SERVER_NAME\"",
"\"STATSD_HOST\"",
"\"STATSD_HOST\"",
"\"STATSD_PREFIX\"",
"\"STATSD_PREFIX\"",
"\"STATSD_PORT\"",
"\"STATSD_PORT\"",
"\"MIXPANEL_TOKEN\"",
"\"MIXPANEL_TOKEN\"",
"\"REDIS_1_PORT_6379_TCP_ADDR\"",
"\"REDIS_1_PORT_6379_TCP_ADDR\"",
"\"REDIS_1_PORT_6379_TCP_PORT\"",
"\"REDIS_1_PORT_6379_TCP_PORT\""
] |
[] |
[
"LAND_PRICE",
"EMERALDQUEST_ENV",
"SET_PvP",
"DENOMINATION_NAME",
"DENOMINATION_FACTOR",
"ADMIN_UUID",
"MIXPANEL_TOKEN",
"REDIS_1_PORT_6379_TCP_ADDR",
"REDIS_1_PORT_6379_TCP_PORT",
"SERVER_NAME",
"MIN_LOOT",
"LOOTIPLIER",
"STATSD_PREFIX",
"STATSD_HOST",
"MAX_LOOT",
"STATSD_PORT"
] |
[]
|
["LAND_PRICE", "EMERALDQUEST_ENV", "SET_PvP", "DENOMINATION_NAME", "DENOMINATION_FACTOR", "ADMIN_UUID", "MIXPANEL_TOKEN", "REDIS_1_PORT_6379_TCP_ADDR", "REDIS_1_PORT_6379_TCP_PORT", "SERVER_NAME", "MIN_LOOT", "LOOTIPLIER", "STATSD_PREFIX", "STATSD_HOST", "MAX_LOOT", "STATSD_PORT"]
|
java
| 16 | 0 | |
parent/restcache/test-server/resttest/resttest/wsgi.py
|
"""
WSGI config for resttest project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "resttest.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "resttest.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
cmd/minio.go
|
package cmd
import (
"bytes"
"context"
"fmt"
"os"
"path"
"strings"
"time"
kubeflowv1 "github.com/StatCan/profiles-controller/pkg/apis/kubeflow/v1"
"github.com/StatCan/profiles-controller/pkg/controllers/profiles"
kubeflowclientset "github.com/StatCan/profiles-controller/pkg/generated/clientset/versioned"
kubeflowinformers "github.com/StatCan/profiles-controller/pkg/generated/informers/externalversions"
"github.com/StatCan/profiles-controller/pkg/signals"
vault "github.com/hashicorp/vault/api"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
)
func logWarnings(warnings []string) {
if warnings != nil && len(warnings) > 0 {
for _, warning := range warnings {
klog.Warning(warning)
}
}
}
// Conf for MinIO
type Conf struct {
AccessKeyID string
Endpoint string
SecretAccessKey string
UseSSL bool
}
func getMinIOConfig(vc *vault.Client, instance string) (*Conf, error) {
data, err := vc.Logical().Read(path.Join(instance, "config"))
if data != nil {
logWarnings(data.Warnings)
}
if err != nil {
return nil, err
}
config := Conf{}
if val, ok := data.Data["accessKeyId"]; ok {
config.AccessKeyID = val.(string)
}
if val, ok := data.Data["endpoint"]; ok {
config.Endpoint = val.(string)
}
if val, ok := data.Data["secretAccessKey"]; ok {
config.SecretAccessKey = val.(string)
}
if val, ok := data.Data["useSSL"]; ok {
config.UseSSL = val.(bool)
}
return &config, nil
}
// CreateBucketsForProfile creates the profile's buckets in the MinIO instances.
func createBucketsForProfile(client *minio.Client, instance string, profileName string) error {
for _, bucket := range []string{profileName, "shared"} {
exists, err := client.BucketExists(context.Background(), bucket)
if err != nil {
fmt.Printf("error checking bucket %q in instance %q\n", bucket, instance)
return err
}
if !exists {
fmt.Printf("making bucket %q in instance %q\n", bucket, instance)
err = client.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{})
if err != nil {
return err
}
} else {
fmt.Printf("bucket %q in instance %q already exists\n", bucket, instance)
}
}
// Make shared folder
_, err := client.PutObject(context.Background(), "shared", path.Join(profileName, ".hold"), bytes.NewReader([]byte{}), 0, minio.PutObjectOptions{})
if err != nil {
fmt.Printf("Failed to create shared file for %q in instance %q already exists\n", profileName, instance)
return err
}
return nil
}
var bucketsCmd = &cobra.Command{
Use: "buckets",
Short: "Configure MinIO buckets",
Long: `Configure MinIO buckets for new Profiles.
`,
Run: func(cmd *cobra.Command, args []string) {
// Setup signals so we can shutdown cleanly
stopCh := signals.SetupSignalHandler()
minioInstances := os.Getenv("MINIO_INSTANCES")
minioInstancesArray := strings.Split(minioInstances, ",")
// Vault
var err error
var vc *vault.Client
if os.Getenv("VAULT_AGENT_ADDR") != "" {
vc, err = vault.NewClient(&vault.Config{
AgentAddress: os.Getenv("VAULT_AGENT_ADDR"),
})
} else {
// Use the default env vars
vc, err = vault.NewClient(&vault.Config{})
}
if err != nil {
klog.Fatalf("Error initializing Vault client: %s", err)
}
// Create Kubernetes config
cfg, err := clientcmd.BuildConfigFromFlags(apiserver, kubeconfig)
if err != nil {
klog.Fatalf("error building kubeconfig: %v", err)
}
kubeflowClient, err := kubeflowclientset.NewForConfig(cfg)
if err != nil {
klog.Fatalf("error building Kubeflow client: %v", err)
}
// Setup informers
kubeflowInformerFactory := kubeflowinformers.NewSharedInformerFactory(kubeflowClient, time.Minute*5)
// Setup controller
controller := profiles.NewController(
kubeflowInformerFactory.Kubeflow().V1().Profiles(),
func(profile *kubeflowv1.Profile) error {
for _, instance := range minioInstancesArray {
conf, err := getMinIOConfig(vc, instance)
if err != nil {
klog.Warningf("error getting config for instance %s: %v", instance, err)
return err
}
client, err := minio.New(conf.Endpoint, &minio.Options{
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, ""),
Secure: conf.UseSSL,
})
err = createBucketsForProfile(client, instance, profile.Name)
if err != nil {
klog.Warningf("error making buckets for profile %s instance %s: %v", profile.Name, instance, err)
// return err
}
}
return nil
},
)
// Start informers
kubeflowInformerFactory.Start(stopCh)
// Run the controller
if err = controller.Run(2, stopCh); err != nil {
klog.Fatalf("error running controller: %v", err)
}
},
}
func init() {
rootCmd.AddCommand(bucketsCmd)
}
|
[
"\"MINIO_INSTANCES\"",
"\"VAULT_AGENT_ADDR\"",
"\"VAULT_AGENT_ADDR\""
] |
[] |
[
"VAULT_AGENT_ADDR",
"MINIO_INSTANCES"
] |
[]
|
["VAULT_AGENT_ADDR", "MINIO_INSTANCES"]
|
go
| 2 | 0 | |
src/mainStart.py
|
import logging
import RPi.GPIO as GPIO
__author__ = 'renderle'
logging.basicConfig(filename='/home/renderle/start.log', level=logging.DEBUG, format='%(asctime)s %(message)s')
print("script mainStart.py started")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
logging.info("Start WP")
GPIO.output(11, GPIO.HIGH)
logging.info("")
print("script mainStart.py stopped - good bye")
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
src/python/tests/core/bot/tasks/fuzz_task_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fuzz_task tests."""
# pylint: disable=protected-access
from builtins import object
from builtins import range
import datetime
import mock
import os
import parameterized
import shutil
import tempfile
import time
import unittest
from pyfakefs import fake_filesystem_unittest
import six
from base import utils
from bot import testcase_manager
from bot.fuzzers import engine
from bot.fuzzers.libFuzzer import engine as libfuzzer_engine
from bot.tasks import fuzz_task
from bot.untrusted_runner import file_host
from build_management import build_manager
from chrome import crash_uploader
from crash_analysis.stack_parsing import stack_analyzer
from datastore import data_handler
from datastore import data_types
from datastore import ndb
from google_cloud_utils import big_query
from metrics import monitor
from metrics import monitoring_metrics
from system import environment
from tests.test_libs import helpers
from tests.test_libs import test_utils
from tests.test_libs import untrusted_runner_helpers
class TrackFuzzerRunResultTest(unittest.TestCase):
"""Test _track_fuzzer_run_result."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_fuzzer_run_result(self):
"""Ensure _track_fuzzer_run_result set the right metrics."""
fuzz_task._track_fuzzer_run_result('name', 10, 100, 2)
fuzz_task._track_fuzzer_run_result('name', 100, 200, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 2000, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 500, 0)
fuzz_task._track_fuzzer_run_result('name', 0, 1000, -1)
fuzz_task._track_fuzzer_run_result('name', 0, 0, 2)
self.assertEqual(
4,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 2
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 0
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': -1
}))
testcase_count_ratio = (
monitoring_metrics.FUZZER_TESTCASE_COUNT_RATIO.get({
'fuzzer': 'name'
}))
self.assertEqual(3.1, testcase_count_ratio.sum)
self.assertEqual(5, testcase_count_ratio.count)
expected_buckets = [0 for _ in range(22)]
expected_buckets[1] = 1
expected_buckets[3] = 1
expected_buckets[11] = 2
expected_buckets[21] = 1
self.assertListEqual(expected_buckets, testcase_count_ratio.buckets)
class TrackBuildRunResultTest(unittest.TestCase):
"""Test _track_build_run_result."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_build_run_result(self):
"""Ensure _track_build_run_result set the right metrics."""
fuzz_task._track_build_run_result('name', 10000, True)
fuzz_task._track_build_run_result('name', 10001, True)
fuzz_task._track_build_run_result('name', 10002, False)
self.assertEqual(
2,
monitoring_metrics.JOB_BAD_BUILD_COUNT.get({
'job': 'name',
'bad_build': True
}))
self.assertEqual(
1,
monitoring_metrics.JOB_BAD_BUILD_COUNT.get({
'job': 'name',
'bad_build': False
}))
class TrackTestcaseRunResultTest(unittest.TestCase):
"""Test _track_testcase_run_result."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_testcase_run_result(self):
"""Ensure _track_testcase_run_result sets the right metrics."""
fuzz_task._track_testcase_run_result('fuzzer', 'job', 2, 5)
fuzz_task._track_testcase_run_result('fuzzer', 'job', 5, 10)
self.assertEqual(7,
monitoring_metrics.JOB_NEW_CRASH_COUNT.get({
'job': 'job'
}))
self.assertEqual(
15, monitoring_metrics.JOB_KNOWN_CRASH_COUNT.get({
'job': 'job'
}))
self.assertEqual(
7, monitoring_metrics.FUZZER_NEW_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
}))
self.assertEqual(
15, monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
}))
class TruncateFuzzerOutputTest(unittest.TestCase):
"""Truncate fuzzer output tests."""
def test_no_truncation(self):
"""No truncation."""
self.assertEqual('aaaa', fuzz_task.truncate_fuzzer_output('aaaa', 10))
def test_truncation(self):
"""Truncate."""
self.assertEqual(
'123456\n...truncated...\n54321',
fuzz_task.truncate_fuzzer_output(
'123456xxxxxxxxxxxxxxxxxxxxxxxxxxx54321', 28))
def test_error(self):
"""Error if limit is too low."""
with self.assertRaises(AssertionError):
self.assertEqual(
'', fuzz_task.truncate_fuzzer_output('123456xxxxxx54321', 10))
class TrackFuzzTimeTest(unittest.TestCase):
"""Test _TrackFuzzTime."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def _test(self, timeout):
"""Test helper."""
time_module = helpers.MockTime()
with fuzz_task._TrackFuzzTime('fuzzer', 'job', time_module) as tracker:
time_module.advance(5)
tracker.timeout = timeout
fuzzer_total_time = monitoring_metrics.FUZZER_TOTAL_FUZZ_TIME.get({
'fuzzer': 'fuzzer',
'timeout': timeout
})
self.assertEqual(5, fuzzer_total_time)
def test_success(self):
"""Test report metrics."""
self._test(False)
def test_timeout(self):
"""Test timeout."""
self._test(True)
class GetFuzzerMetadataFromOutputTest(unittest.TestCase):
"""Test get_fuzzer_metadata_from_output."""
def test_no_metadata(self):
"""Tests no metadata in output."""
data = 'abc\ndef\n123123'
self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {})
data = ''
self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {})
def test_metadata(self):
"""Tests parsing of metadata."""
data = ('abc\n'
'def\n'
'metadata:invalid: invalid\n'
'metadat::invalid: invalid\n'
'metadata::foo: bar\n'
'123123\n'
'metadata::blah: 1\n'
'metadata::test:abcd\n'
'metadata::test2: def\n')
self.assertDictEqual(
fuzz_task.get_fuzzer_metadata_from_output(data), {
'blah': '1',
'test': 'abcd',
'test2': 'def',
'foo': 'bar'
})
class GetRegressionTest(unittest.TestCase):
"""Test get_regression."""
def setUp(self):
helpers.patch(self, ['build_management.build_manager.is_custom_binary'])
def test_one_time_crasher(self):
"""Test when one_time_crasher_flag is True."""
self.mock.is_custom_binary.return_value = False
self.assertEqual('NA', fuzz_task.get_regression(True))
def test_custom_binary(self):
"""Test for custom binary."""
self.mock.is_custom_binary.return_value = True
self.assertEqual('NA', fuzz_task.get_regression(False))
def test_reproducible_non_custom_binary(self):
"""Test for reproducible non-custom binary."""
self.mock.is_custom_binary.return_value = False
self.assertEqual('', fuzz_task.get_regression(False))
class GetFixedOrMinimizedKeyTest(unittest.TestCase):
"""Test get_fixed_or_minimized_key."""
def test_one_time_crasher(self):
"""Test when one_time_crasher_flag is True."""
self.assertEqual('NA', fuzz_task.get_fixed_or_minimized_key(True))
def test_reproducible(self):
"""Test for reproducible."""
self.assertEqual('', fuzz_task.get_fixed_or_minimized_key(False))
class CrashInitTest(fake_filesystem_unittest.TestCase):
"""Test Crash.__init__."""
def setUp(self):
helpers.patch(self, [
'chrome.crash_uploader.FileMetadataInfo',
'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs',
'crash_analysis.stack_parsing.stack_analyzer.get_crash_data',
'bot.testcase_manager.get_additional_command_line_flags',
'bot.testcase_manager.get_command_line_for_application',
'base.utils.get_crash_stacktrace_output',
'crash_analysis.crash_analyzer.ignore_stacktrace',
'crash_analysis.crash_analyzer.is_security_issue',
])
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.mock.get_command_line_for_application.return_value = 'cmd'
dummy_state = stack_analyzer.StackAnalyzerState()
dummy_state.crash_type = 'type'
dummy_state.crash_address = 'address'
dummy_state.crash_state = 'state'
dummy_state.crash_stacktrace = 'orig_trace'
dummy_state.frames = ['frame 1', 'frame 2']
self.mock.get_crash_data.return_value = dummy_state
self.mock.get_crash_stacktrace_output.return_value = 'trace'
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (
'fuzzed_key', True, 'absolute_path', 'archive_filename')
environment.set_value('FILTER_FUNCTIONAL_BUGS', False)
with open('/stack_file_path', 'w') as f:
f.write('unsym')
def test_error(self):
"""Test failing to reading stacktrace file."""
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges',
'/no_stack_file'))
self.assertIsNone(crash)
def _test_crash(self, should_be_ignored, security_flag):
"""Test crash."""
self.mock.get_command_line_for_application.reset_mock()
self.mock.get_crash_data.reset_mock()
self.mock.get_crash_stacktrace_output.reset_mock()
self.mock.is_security_issue.reset_mock()
self.mock.ignore_stacktrace.reset_mock()
self.mock.is_security_issue.return_value = security_flag
self.mock.ignore_stacktrace.return_value = should_be_ignored
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges',
'/stack_file_path'))
self.assertEqual('dir/path-http-name', crash.file_path)
self.assertEqual(123, crash.crash_time)
self.assertEqual(11, crash.return_code)
self.assertListEqual(['res'], crash.resource_list)
self.assertEqual('ges', crash.gestures)
self.assertEqual('path-http-name', crash.filename)
self.assertTrue(crash.http_flag)
self.assertEqual('cmd', crash.application_command_line)
self.mock.get_command_line_for_application.assert_called_once_with(
'dir/path-http-name', needs_http=True)
self.assertEqual('unsym', crash.unsymbolized_crash_stacktrace)
self.assertEqual('type', crash.crash_type)
self.assertEqual('address', crash.crash_address)
self.assertEqual('state', crash.crash_state)
self.assertListEqual(['frame 1', 'frame 2'], crash.crash_frames)
self.mock.get_crash_data.assert_called_once_with('unsym')
self.assertEqual('trace', crash.crash_stacktrace)
self.mock.get_crash_stacktrace_output.assert_called_once_with(
'cmd', 'orig_trace', 'unsym')
self.assertEqual(security_flag, crash.security_flag)
self.mock.is_security_issue.assert_called_once_with('unsym', 'type',
'address')
self.assertEqual('type,state,%s' % security_flag, crash.key)
self.assertEqual(should_be_ignored, crash.should_be_ignored)
self.mock.ignore_stacktrace.assert_called_once_with('orig_trace')
self.assertFalse(hasattr(crash, 'fuzzed_key'))
return crash
def _test_validity_and_get_functional_crash(self):
"""Test validity of different crashes and return functional crash."""
security_crash = self._test_crash(
should_be_ignored=False, security_flag=True)
self.assertIsNone(security_crash.get_error())
self.assertTrue(security_crash.is_valid())
ignored_crash = self._test_crash(should_be_ignored=True, security_flag=True)
self.assertIn('False crash', ignored_crash.get_error())
self.assertFalse(ignored_crash.is_valid())
functional_crash = self._test_crash(
should_be_ignored=False, security_flag=False)
return functional_crash
def test_valid_functional_bug(self):
"""Test valid because of functional bug."""
functional_crash = self._test_validity_and_get_functional_crash()
self.assertIsNone(functional_crash.get_error())
self.assertTrue(functional_crash.is_valid())
def test_invalid_functional_bug(self):
"""Test invalid because of functional bug."""
environment.set_value('FILTER_FUNCTIONAL_BUGS', True)
functional_crash = self._test_validity_and_get_functional_crash()
self.assertIn('Functional crash', functional_crash.get_error())
self.assertFalse(functional_crash.is_valid())
def test_hydrate_fuzzed_key(self):
"""Test hydrating fuzzed_key."""
crash = self._test_crash(should_be_ignored=False, security_flag=True)
self.assertFalse(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
crash.archive_testcase_in_blobstore()
self.assertTrue(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
self.assertEqual('fuzzed_key', crash.fuzzed_key)
self.assertTrue(crash.archived)
self.assertEqual('absolute_path', crash.absolute_path)
self.assertEqual('archive_filename', crash.archive_filename)
def test_hydrate_fuzzed_key_failure(self):
"""Test fail to hydrate fuzzed_key."""
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (None,
False,
None,
None)
crash = self._test_crash(should_be_ignored=False, security_flag=True)
self.assertFalse(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
crash.archive_testcase_in_blobstore()
self.assertTrue(crash.is_archived())
self.assertIn('Unable to store testcase in blobstore', crash.get_error())
self.assertFalse(crash.is_valid())
self.assertIsNone(crash.fuzzed_key)
self.assertFalse(crash.archived)
self.assertIsNone(crash.absolute_path)
self.assertIsNone(crash.archive_filename)
def test_args_from_testcase_manager(self):
"""Test args from testcase_manager.Crash."""
testcase_manager_crash = testcase_manager.Crash('path', 0, 0, [], [],
'/stack_file_path')
self.mock.get_additional_command_line_flags.return_value = 'minimized'
environment.set_value('APP_ARGS', 'app')
crash = fuzz_task.Crash.from_testcase_manager_crash(testcase_manager_crash)
self.assertEqual('app minimized', crash.arguments)
class CrashGroupTest(unittest.TestCase):
"""Test CrashGroup."""
def setUp(self):
helpers.patch(self, [
'bot.tasks.fuzz_task.find_main_crash',
'datastore.data_handler.find_testcase',
'datastore.data_handler.get_project_name',
])
self.mock.get_project_name.return_value = 'some_project'
self.crashes = [self._make_crash('g1'), self._make_crash('g2')]
self.context = mock.MagicMock(
test_timeout=99, fuzzer_name='test', fuzz_target=None)
self.reproducible_testcase = self._make_testcase(
project_name='some_project',
bug_information='',
one_time_crasher_flag=False)
self.unreproducible_testcase = self._make_testcase(
project_name='some_project',
bug_information='',
one_time_crasher_flag=True)
def _make_crash(self, gestures):
crash = mock.MagicMock(
crash_type='type',
crash_state='state',
security_flag=True,
file_path='file_path',
http_flag=True,
gestures=gestures)
return crash
def _make_testcase(self,
project_name,
bug_information,
one_time_crasher_flag,
timestamp=datetime.datetime.now()):
"""Make testcase."""
testcase = data_types.Testcase()
testcase.timestamp = timestamp
testcase.one_time_crasher_flag = one_time_crasher_flag
testcase.bug_information = bug_information
testcase.project_name = project_name
return testcase
def test_no_existing_testcase(self):
"""is_new=True and should_create_testcase=True when there's no existing
testcase."""
self.mock.find_testcase.return_value = None
self.mock.find_main_crash.return_value = self.crashes[0], True
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertTrue(group.should_create_testcase())
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertIsNone(group.existing_testcase)
self.assertEqual(self.crashes[0], group.main_crash)
self.assertTrue(group.is_new())
def test_has_existing_reproducible_testcase(self):
"""should_create_testcase=False when there's an existing reproducible
testcase."""
self.mock.find_testcase.return_value = self.reproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], True)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertFalse(group.should_create_testcase())
self.assertTrue(group.has_existing_reproducible_testcase())
def test_reproducible_crash(self):
"""should_create_testcase=True when the group is reproducible."""
self.mock.find_testcase.return_value = self.unreproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], False)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertTrue(group.should_create_testcase())
self.assertFalse(group.has_existing_reproducible_testcase())
self.assertFalse(group.one_time_crasher_flag)
def test_has_existing_unreproducible_testcase(self):
"""should_create_testcase=False when the unreproducible testcase already
exists."""
self.mock.find_testcase.return_value = self.unreproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], True)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertFalse(group.should_create_testcase())
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertFalse(group.has_existing_reproducible_testcase())
self.assertTrue(group.one_time_crasher_flag)
class FindMainCrashTest(unittest.TestCase):
"""Test find_main_crash."""
def setUp(self):
helpers.patch(self, [
'bot.testcase_manager.test_for_reproducibility',
])
self.crashes = [
self._make_crash('g1'),
self._make_crash('g2'),
self._make_crash('g3'),
self._make_crash('g4')
]
self.reproducible_crashes = []
# pylint: disable=unused-argument
def test_for_repro(fuzzer_name,
full_fuzzer_name,
file_path,
state,
security_flag,
test_timeout,
http_flag,
gestures,
arguments=None):
"""Mock test_for_reproducibility."""
for c in self.reproducible_crashes:
if c.gestures == gestures:
return True
return False
self.mock.test_for_reproducibility.side_effect = test_for_repro
def _make_crash(self, gestures):
crash = mock.MagicMock(
file_path='file_path',
crash_state='state',
security_flag=True,
test_timeout=999,
gestures=gestures)
return crash
def test_reproducible_crash(self):
"""Find that the 2nd crash is reproducible."""
for c in self.crashes:
c.is_valid.return_value = True
self.crashes[0].is_valid.return_value = False
self.reproducible_crashes = [self.crashes[2]]
self.assertEqual((self.crashes[2], False),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
self.crashes[0].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[1].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[2].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[3].archive_testcase_in_blobstore.assert_not_called()
# Calls for self.crashes[1] and self.crashes[2].
self.assertEqual(2, self.mock.test_for_reproducibility.call_count)
def test_unreproducible_crash(self):
"""No reproducible crash. Find the first valid one."""
for c in self.crashes:
c.is_valid.return_value = True
self.crashes[0].is_valid.return_value = False
self.reproducible_crashes = []
self.assertEqual((self.crashes[1], True),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
for c in self.crashes:
c.archive_testcase_in_blobstore.assert_called_once_with()
# Calls for every crash except self.crashes[0] because it's invalid.
self.assertEqual(
len(self.crashes) - 1, self.mock.test_for_reproducibility.call_count)
def test_no_valid_crash(self):
"""No valid crash."""
for c in self.crashes:
c.is_valid.return_value = False
self.reproducible_crashes = []
self.assertEqual((None, None),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
for c in self.crashes:
c.archive_testcase_in_blobstore.assert_called_once_with()
self.assertEqual(0, self.mock.test_for_reproducibility.call_count)
@test_utils.with_cloud_emulators('datastore')
class ProcessCrashesTest(fake_filesystem_unittest.TestCase):
"""Test process_crashes."""
def setUp(self):
helpers.patch(self, [
'chrome.crash_uploader.get_symbolized_stack_bytes',
'bot.tasks.task_creation.create_tasks',
'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs',
'crash_analysis.stack_parsing.stack_analyzer.get_crash_data',
'build_management.revisions.get_real_revision',
'bot.testcase_manager.get_command_line_for_application',
'bot.testcase_manager.test_for_reproducibility',
'base.utils.get_crash_stacktrace_output',
'crash_analysis.crash_analyzer.ignore_stacktrace',
'crash_analysis.crash_analyzer.is_security_issue',
'datastore.data_handler.get_issue_tracker_name',
'datastore.data_handler.get_project_name',
'google.appengine.api.app_identity.get_application_id',
'google_cloud_utils.big_query.Client.insert',
'google_cloud_utils.big_query.get_api_client', 'time.sleep', 'time.time'
])
test_utils.set_up_pyfakefs(self)
self.mock.time.return_value = 987
self.mock.get_issue_tracker_name.return_value = 'some_issue_tracker'
self.mock.get_project_name.return_value = 'some_project'
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (
'fuzzed_key', True, 'absolute_path', 'archive_filename')
def _make_crash(self, trace, state='state'):
"""Make crash."""
self.mock.get_real_revision.return_value = 'this.is.fake.ver'
self.mock.get_command_line_for_application.return_value = 'cmd'
dummy_state = stack_analyzer.StackAnalyzerState()
dummy_state.crash_type = 'type'
dummy_state.crash_address = 'address'
dummy_state.crash_state = state
dummy_state.crash_stacktrace = 'orig_trace'
dummy_state.crash_frames = ['frame 1', 'frame 2']
self.mock.get_crash_data.return_value = dummy_state
self.mock.get_symbolized_stack_bytes.return_value = 'f00df00d'
self.mock.get_crash_stacktrace_output.return_value = trace
self.mock.is_security_issue.return_value = True
self.mock.ignore_stacktrace.return_value = False
with open('/stack_file_path', 'w') as f:
f.write('unsym')
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], ['ges'],
'/stack_file_path'))
return crash
def test_existing_unreproducible_testcase(self):
"""Test existing unreproducible testcase."""
crashes = [self._make_crash('c1'), self._make_crash('c2')]
self.mock.test_for_reproducibility.return_value = False
existing_testcase = data_types.Testcase()
existing_testcase.crash_stacktrace = 'existing'
existing_testcase.crash_type = crashes[0].crash_type
existing_testcase.crash_state = crashes[0].crash_state
existing_testcase.security_flag = crashes[0].security_flag
existing_testcase.one_time_crasher_flag = True
existing_testcase.job_type = 'existing_job'
existing_testcase.timestamp = datetime.datetime.now()
existing_testcase.project_name = 'some_project'
existing_testcase.put()
variant = data_types.TestcaseVariant()
variant.status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
variant.job_type = 'job'
variant.testcase_id = existing_testcase.key.id()
variant.put()
new_crash_count, known_crash_count, groups = fuzz_task.process_crashes(
crashes=crashes,
context=fuzz_task.Context(
project_name='some_project',
bot_name='bot',
job_type='job',
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=111,
disable_ubsan=True,
platform_id='platform',
crash_revision=1234,
fuzzer_name='fuzzer',
window_argument='win_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1,
test_timeout=2,
thread_wait_timeout=3,
data_directory='/data'))
self.assertEqual(0, new_crash_count)
self.assertEqual(2, known_crash_count)
self.assertEqual(1, len(groups))
self.assertEqual(2, len(groups[0].crashes))
self.assertFalse(groups[0].is_new())
self.assertEqual(crashes[0].crash_type, groups[0].main_crash.crash_type)
self.assertEqual(crashes[0].crash_state, groups[0].main_crash.crash_state)
self.assertEqual(crashes[0].security_flag,
groups[0].main_crash.security_flag)
testcases = list(data_types.Testcase.query())
self.assertEqual(1, len(testcases))
self.assertEqual('existing', testcases[0].crash_stacktrace)
variant = data_handler.get_testcase_variant(existing_testcase.key.id(),
'job')
self.assertEqual(data_types.TestcaseVariantStatus.FLAKY, variant.status)
self.assertEqual('fuzzed_key', variant.reproducer_key)
self.assertEqual(1234, variant.revision)
self.assertEqual('type', variant.crash_type)
self.assertEqual('state', variant.crash_state)
self.assertEqual(True, variant.security_flag)
self.assertEqual(True, variant.is_similar)
@parameterized.parameterized.expand(['some_project', 'chromium'])
def test_create_many_groups(self, project_name):
"""Test creating many groups."""
self.mock.get_project_name.return_value = project_name
self.mock.insert.return_value = {'insertErrors': [{'index': 0}]}
# TODO(metzman): Add a seperate test for strategies.
r2_stacktrace = ('r2\ncf::fuzzing_strategies: value_profile\n')
crashes = [
self._make_crash('r1', state='reproducible1'),
self._make_crash(r2_stacktrace, state='reproducible1'),
self._make_crash('r3', state='reproducible1'),
self._make_crash('r4', state='reproducible2'),
self._make_crash('u1', state='unreproducible1'),
self._make_crash('u2', state='unreproducible2'),
self._make_crash('u3', state='unreproducible2'),
self._make_crash('u4', state='unreproducible3')
]
self.mock.test_for_reproducibility.side_effect = [
False, # For r1. It returns False. So, r1 is demoted.
True, # For r2. It returns True. So, r2 becomes primary for its group.
True, # For r4.
False, # For u1.
False, # For u2.
False, # For u3.
False
] # For u4.
new_crash_count, known_crash_count, groups = fuzz_task.process_crashes(
crashes=crashes,
context=fuzz_task.Context(
project_name=project_name,
bot_name='bot',
job_type='job',
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=111,
disable_ubsan=False,
platform_id='platform',
crash_revision=1234,
fuzzer_name='fuzzer',
window_argument='win_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1,
test_timeout=2,
thread_wait_timeout=3,
data_directory='/data'))
self.assertEqual(5, new_crash_count)
self.assertEqual(3, known_crash_count)
self.assertEqual(5, len(groups))
self.assertEqual([
'reproducible1', 'reproducible2', 'unreproducible1', 'unreproducible2',
'unreproducible3'
], [group.main_crash.crash_state for group in groups])
self.assertEqual([True, True, True, True, True],
[group.is_new() for group in groups])
self.assertEqual([3, 1, 1, 2, 1], [len(group.crashes) for group in groups])
testcases = list(data_types.Testcase.query())
self.assertEqual(5, len(testcases))
self.assertSetEqual(
set([r2_stacktrace, 'r4', 'u1', 'u2', 'u4']),
set(t.crash_stacktrace for t in testcases))
self.assertSetEqual(
set([
'{"fuzzing_strategies": ["value_profile"]}', None, None, None, None
]), set(t.additional_metadata for t in testcases))
# r2 is a reproducible crash, so r3 doesn't
# invoke archive_testcase_in_blobstore. Therefore, the
# archive_testcase_in_blobstore is called `len(crashes) - 1`.
self.assertEqual(
len(crashes) - 1,
self.mock.archive_testcase_and_dependencies_in_gcs.call_count)
# Check only the desired testcases were saved.
actual_crash_infos = [group.main_crash.crash_info for group in groups]
if project_name != 'chromium':
expected_crash_infos = [None] * len(actual_crash_infos)
else:
expected_saved_crash_info = crash_uploader.CrashReportInfo(
product='Chrome_' + environment.platform().lower().capitalize(),
version='this.is.fake.ver',
serialized_crash_stack_frames='f00df00d')
expected_crash_infos = [
expected_saved_crash_info, # r2 is main crash for group r1,r2,r3
expected_saved_crash_info, # r4 is main crash for its own group
None, # u1 is not reproducible
None, # u2, u3 are not reproducible
None, # u4 is not reproducible
]
self.assertEqual(len(expected_crash_infos), len(actual_crash_infos))
for expected, actual in zip(expected_crash_infos, actual_crash_infos):
if not expected:
self.assertIsNone(actual)
continue
self.assertEqual(expected.product, actual.product)
self.assertEqual(expected.version, actual.version)
self.assertEqual(expected.serialized_crash_stack_frames,
actual.serialized_crash_stack_frames)
def _make_big_query_json(crash, reproducible_flag, new_flag, testcase_id):
return {
'crash_type': crash.crash_type,
'crash_state': crash.crash_state,
'created_at': 987,
'platform': 'platform',
'crash_time_in_ms': int(crash.crash_time * 1000),
'parent_fuzzer_name': 'engine',
'fuzzer_name': 'engine_binary',
'job_type': 'job',
'security_flag': crash.security_flag,
'reproducible_flag': reproducible_flag,
'revision': '1234',
'new_flag': new_flag,
'project': project_name,
'testcase_id': testcase_id
}
def _get_testcase_id(crash):
rows = list(
data_types.Testcase.query(
data_types.Testcase.crash_type == crash.crash_type,
data_types.Testcase.crash_state == crash.crash_state,
data_types.Testcase.security_flag == crash.security_flag))
if not rows:
return None
return str(rows[0].key.id())
# Calls to write 5 groups of crashes to BigQuery.
self.assertEqual(5, self.mock.insert.call_count)
self.mock.insert.assert_has_calls([
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[0], True, False, None),
'%s:bot:987:0' % crashes[0].key),
big_query.Insert(
_make_big_query_json(crashes[1], True, True,
_get_testcase_id(crashes[1])),
'%s:bot:987:1' % crashes[0].key),
big_query.Insert(
_make_big_query_json(crashes[2], True, False, None),
'%s:bot:987:2' % crashes[0].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[3], True, True,
_get_testcase_id(crashes[3])),
'%s:bot:987:0' % crashes[3].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[4], False, True,
_get_testcase_id(crashes[4])),
'%s:bot:987:0' % crashes[4].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[5], False, True,
_get_testcase_id(crashes[5])),
'%s:bot:987:0' % crashes[5].key),
big_query.Insert(
_make_big_query_json(crashes[6], False, False, None),
'%s:bot:987:1' % crashes[5].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[7], False, True,
_get_testcase_id(crashes[7])),
'%s:bot:987:0' % crashes[7].key)
]),
])
class WriteCrashToBigQueryTest(unittest.TestCase):
"""Test write_crash_to_big_query."""
def setUp(self):
self.client = mock.Mock(spec_set=big_query.Client)
helpers.patch(self, [
'system.environment.get_value',
'datastore.data_handler.get_project_name',
'google_cloud_utils.big_query.Client',
'time.time',
])
monitor.metrics_store().reset_for_testing()
self.mock.get_project_name.return_value = 'some_project'
self.mock.get_value.return_value = 'bot'
self.mock.Client.return_value = self.client
self.mock.time.return_value = 99
self.crashes = [
self._make_crash('c1'),
self._make_crash('c2'),
self._make_crash('c3')
]
newly_created_testcase = mock.MagicMock()
newly_created_testcase.key.id.return_value = 't'
self.group = mock.MagicMock(
crashes=self.crashes,
main_crash=self.crashes[0],
one_time_crasher_flag=False,
newly_created_testcase=newly_created_testcase)
self.group.is_new.return_value = True
def _create_context(self, job_type, platform_id):
return fuzz_task.Context(
project_name='some_project',
bot_name='bot',
job_type=job_type,
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=32,
disable_ubsan=False,
platform_id=platform_id,
crash_revision=1234,
fuzzer_name='engine',
window_argument='windows_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1.0,
test_timeout=5,
thread_wait_timeout=6,
data_directory='data')
def _make_crash(self, state):
crash = mock.Mock(
crash_type='type',
crash_state=state,
crash_time=111,
security_flag=True,
key='key')
return crash
def _json(self, job, platform, state, new_flag, testcase_id):
return {
'crash_type': 'type',
'crash_state': state,
'created_at': 99,
'platform': platform,
'crash_time_in_ms': 111000,
'parent_fuzzer_name': 'engine',
'fuzzer_name': 'engine_binary',
'job_type': job,
'security_flag': True,
'reproducible_flag': True,
'revision': '1234',
'new_flag': new_flag,
'project': 'some_project',
'testcase_id': testcase_id
}
def test_all_succeed(self):
"""Test writing succeeds."""
self.client.insert.return_value = {}
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(3, success_count)
self.assertEqual(0, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'),
big_query.Insert(
self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'),
big_query.Insert(
self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2')
])
def test_succeed(self):
"""Test writing succeeds."""
self.client.insert.return_value = {'insertErrors': [{'index': 1}]}
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(2, success_count)
self.assertEqual(1, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'),
big_query.Insert(
self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'),
big_query.Insert(
self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2')
])
def test_chromeos_platform(self):
"""Test ChromeOS platform is written in stats."""
self.client.insert.return_value = {'insertErrors': [{'index': 1}]}
context = self._create_context('job_chromeos', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(2, success_count)
self.assertEqual(1, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c1', True, 't'),
'key:bot:99:0'),
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c2', False, None),
'key:bot:99:1'),
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c3', False, None),
'key:bot:99:2')
])
def test_exception(self):
"""Test writing raising an exception."""
self.client.insert.side_effect = Exception('error')
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(0, success_count)
self.assertEqual(3, failure_count)
class ConvertGroupsToCrashesTest(object):
"""Test convert_groups_to_crashes."""
def test_convert(self):
"""Test converting."""
groups = [
mock.Mock(
crashes=[mock.Mock(), mock.Mock()],
main_crash=mock.Mock(
crash_type='t1', crash_state='s1', security_flag=True)),
mock.Mock(
crashes=[mock.Mock()],
main_crash=mock.Mock(
crash_type='t2', crash_state='s2', security_flag=False)),
]
groups[0].is_new.return_value = False
groups[1].is_new.return_value = True
self.assertEqual([
{
'is_new': False,
'count': 2,
'crash_type': 't1',
'crash_state': 's1',
'security_flag': True
},
{
'is_new': True,
'count': 1,
'crash_type': 't2',
'crash_state': 's2',
'security_flag': False
},
], fuzz_task.convert_groups_to_crashes(groups))
class TestCorpusSync(fake_filesystem_unittest.TestCase):
"""Test corpus sync."""
def setUp(self):
helpers.patch(self, [
'fuzzing.corpus_manager.FuzzTargetCorpus.rsync_to_disk',
'fuzzing.corpus_manager.FuzzTargetCorpus.upload_files',
'google_cloud_utils.storage.last_updated',
])
helpers.patch_environ(self)
os.environ['FAIL_RETRIES'] = '1'
os.environ['CORPUS_BUCKET'] = 'bucket'
self.mock.rsync_to_disk.return_value = True
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/dir')
self.fs.create_dir('/dir1')
def _write_corpus_files(self, *args, **kwargs): # pylint: disable=unused-argument
self.fs.create_file('/dir/a')
self.fs.create_file('/dir/b')
return True
def test_sync(self):
"""Test corpus sync."""
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
self.mock.rsync_to_disk.side_effect = self._write_corpus_files
self.assertTrue(corpus.sync_from_gcs())
self.assertTrue(os.path.exists('/dir1/.child_sync'))
self.assertEqual(('/dir',), self.mock.rsync_to_disk.call_args[0][1:])
self.fs.create_file('/dir/c')
self.assertListEqual(['/dir/c'], corpus.get_new_files())
corpus.upload_files(corpus.get_new_files())
self.assertEqual((['/dir/c'],), self.mock.upload_files.call_args[0][1:])
self.assertListEqual([], corpus.get_new_files())
def test_no_sync(self):
"""Test no corpus sync when bundle is not updated since last sync."""
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
utils.write_data_to_file(time.time(), '/dir1/.child_sync')
self.mock.last_updated.return_value = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
self.assertTrue(corpus.sync_from_gcs())
self.assertEqual(0, self.mock.rsync_to_disk.call_count)
def test_sync_with_failed_last_update(self):
"""Test corpus sync when failed to get last update info from gcs."""
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
utils.write_data_to_file(time.time(), '/dir1/.child_sync')
self.mock.last_updated.return_value = None
self.assertTrue(corpus.sync_from_gcs())
self.assertEqual(1, self.mock.rsync_to_disk.call_count)
@test_utils.with_cloud_emulators('datastore')
class RecordFuzzTargetTest(unittest.TestCase):
"""Tests for record_fuzz_target."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'base.utils.is_oss_fuzz',
'base.utils.utcnow',
])
self.mock.is_oss_fuzz.return_value = False
self.mock.utcnow.return_value = datetime.datetime(2018, 1, 1)
def test_record_fuzz_target(self):
"""Test that record_fuzz_target works."""
fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertDictEqual({
'binary': 'child',
'engine': 'libFuzzer',
'project': 'test-project',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name())
self.assertEqual('child', fuzz_target.project_qualified_name())
def test_record_fuzz_target_existing(self):
"""Test that record_fuzz_target works when updating an existing entity."""
data_types.FuzzTarget(
binary='child', engine='libFuzzer', project='test-project').put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_child',
job='job',
engine='libFuzzer',
last_run=datetime.datetime(2017, 12, 31, 0, 0)).put()
fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertDictEqual({
'binary': 'child',
'engine': 'libFuzzer',
'project': 'test-project',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name())
self.assertEqual('child', fuzz_target.project_qualified_name())
def test_record_fuzz_target_no_binary_name(self):
"""Test recording fuzz target with no binary."""
# Passing None to binary_name is an error. We shouldn't create any
# FuzzTargets as a result.
fuzz_task.record_fuzz_target('libFuzzer', None, 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertIsNone(fuzz_target)
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertIsNone(job_mapping)
@parameterized.parameterized.expand(['child', 'proj_child'])
def test_record_fuzz_target_ossfuzz(self, binary_name):
"""Test that record_fuzz_target works with OSS-Fuzz projects."""
self.mock.is_oss_fuzz.return_value = True
data_types.Job(name='job', environment_string='PROJECT_NAME = proj\n').put()
fuzz_task.record_fuzz_target('libFuzzer', binary_name, 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_proj_child').get()
self.assertDictEqual({
'binary': binary_name,
'engine': 'libFuzzer',
'project': 'proj',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob,
'libFuzzer_proj_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_proj_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_proj_child', fuzz_target.fully_qualified_name())
self.assertEqual('proj_child', fuzz_target.project_qualified_name())
@test_utils.with_cloud_emulators('datastore')
class DoEngineFuzzingTest(fake_filesystem_unittest.TestCase):
"""do_engine_fuzzing tests."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'bot.fuzzers.engine_common.current_timestamp',
'bot.tasks.fuzz_task.GcsCorpus.sync_from_gcs',
'bot.tasks.fuzz_task.GcsCorpus.upload_files',
'build_management.revisions.get_component_list',
'bot.testcase_manager.upload_log',
'bot.testcase_manager.upload_testcase',
'metrics.fuzzer_stats.upload_stats',
])
test_utils.set_up_pyfakefs(self)
os.environ['JOB_NAME'] = 'libfuzzer_asan_test'
os.environ['FUZZ_INPUTS'] = '/fuzz-inputs'
os.environ['FUZZ_INPUTS_DISK'] = '/fuzz-inputs-disk'
os.environ['BUILD_DIR'] = '/build_dir'
os.environ['MAX_TESTCASES'] = '2'
os.environ['AUTOMATIC_LABELS'] = 'auto_label,auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component,auto_component1'
self.fs.create_file('/build_dir/test_target')
self.fs.create_file(
'/build_dir/test_target.labels', contents='label1\nlabel2')
self.fs.create_file(
'/build_dir/test_target.owners', contents='[email protected]')
self.fs.create_file(
'/build_dir/test_target.components', contents='component1\ncomponent2')
self.fs.create_file('/input')
self.mock.sync_from_gcs.return_value = True
self.mock.upload_files.return_value = True
self.mock.get_component_list.return_value = [{
'component': 'component',
'link_text': 'rev',
}]
self.mock.current_timestamp.return_value = 0.0
def test_basic(self):
"""Test basic fuzzing session."""
session = fuzz_task.FuzzingSession('libFuzzer', 'libfuzzer_asan_test', 60)
session.testcase_directory = os.environ['FUZZ_INPUTS']
session.data_directory = '/data_dir'
os.environ['FUZZ_TARGET'] = 'test_target'
os.environ['APP_REVISION'] = '1'
expected_crashes = [engine.Crash('/input', 'stack', ['args'], 1.0)]
engine_impl = mock.Mock()
engine_impl.name = 'libFuzzer'
engine_impl.prepare.return_value = engine.FuzzOptions(
'/corpus', ['arg'], {
'strategy_1': 1,
'strategy_2': 50,
})
engine_impl.fuzz.side_effect = lambda *_: engine.FuzzResult(
'logs', ['cmd'], expected_crashes, {'stat': 1}, 42.0)
crashes, fuzzer_metadata = session.do_engine_fuzzing(engine_impl)
self.assertDictEqual({
'fuzzer_binary_name':
'test_target',
'issue_components':
'component1,component2,auto_component,auto_component1',
'issue_labels':
'label1,label2,auto_label,auto_label1',
'issue_owners':
'[email protected]',
}, fuzzer_metadata)
log_time = datetime.datetime(1970, 1, 1, 0, 0)
log_call = mock.call(
'Component revisions (build r1):\n'
'component: rev\n\n'
'Return code: 1\n\n'
'Command: cmd\nBot: None\nTime ran: 42.0\n\n'
'logs\n'
'cf::fuzzing_strategies: strategy_1:1,strategy_2:50', log_time)
self.mock.upload_log.assert_has_calls([log_call, log_call])
self.mock.upload_testcase.assert_has_calls([
mock.call('/input', log_time),
mock.call('/input', log_time),
])
self.assertEqual(2, len(crashes))
for i in range(2):
self.assertEqual('/input', crashes[i].file_path)
self.assertEqual(1, crashes[i].return_code)
self.assertEqual('stack', crashes[i].unsymbolized_crash_stacktrace)
self.assertEqual(1.0, crashes[i].crash_time)
self.assertEqual('args', crashes[i].arguments)
for i in range(2):
upload_args = self.mock.upload_stats.call_args_list[i][0][0]
testcase_run = upload_args[0]
self.assertDictEqual({
'build_revision': 1,
'command': ['cmd'],
'fuzzer': u'libFuzzer_test_target',
'job': 'libfuzzer_asan_test',
'kind': 'TestcaseRun',
'stat': 1,
'strategy_strategy_1': 1,
'strategy_strategy_2': 50,
'timestamp': 0.0,
}, testcase_run.data)
class UntrustedRunEngineFuzzerTest(
untrusted_runner_helpers.UntrustedRunnerIntegrationTest):
"""Engine fuzzing tests for untrusted."""
def setUp(self):
"""Set up."""
super(UntrustedRunEngineFuzzerTest, self).setUp()
environment.set_value('JOB_NAME', 'libfuzzer_asan_job')
job = data_types.Job(
name='libfuzzer_asan_job',
environment_string=(
'RELEASE_BUILD_BUCKET_PATH = '
'gs://clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-([0-9]+).zip\n'
'REVISION_VARS_URL = https://commondatastorage.googleapis.com/'
'clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-%s.srcmap.json\n'))
job.put()
self.temp_dir = tempfile.mkdtemp(dir=environment.get_value('FUZZ_INPUTS'))
environment.set_value('USE_MINIJAIL', False)
def tearDown(self):
super(UntrustedRunEngineFuzzerTest, self).tearDown()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_run_engine_fuzzer(self):
"""Test running engine fuzzer."""
self._setup_env(job_type='libfuzzer_asan_job')
environment.set_value('FUZZ_TEST_TIMEOUT', 3600)
build_manager.setup_build()
corpus_directory = os.path.join(self.temp_dir, 'corpus')
testcase_directory = os.path.join(self.temp_dir, 'artifacts')
os.makedirs(file_host.rebase_to_worker_root(corpus_directory))
os.makedirs(file_host.rebase_to_worker_root(testcase_directory))
result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(
libfuzzer_engine.LibFuzzerEngine(), 'test_fuzzer', corpus_directory,
testcase_directory)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000',
result.logs)
self.assertEqual(1, len(result.crashes))
self.assertTrue(result.crashes[0].input_path.startswith(
os.environ['ROOT_DIR']))
self.assertTrue(os.path.exists(result.crashes[0].input_path))
self.assertIsInstance(result.stats.get('number_of_executed_units'), int)
self.assertIsInstance(result.stats.get('oom_count'), int)
self.assertIsInstance(
result.stats.get('strategy_selection_method'), six.string_types)
self.assertDictEqual({'fuzzer_binary_name': 'test_fuzzer'}, fuzzer_metadata)
class AddIssueMetadataFromEnvironmentTest(unittest.TestCase):
"""Tests for _add_issue_metadata_from_environment."""
def setUp(self):
helpers.patch_environ(self)
def test_add_no_existing(self):
"""Test adding issue metadata when there are none existing."""
os.environ['AUTOMATIC_LABELS'] = 'auto_label'
os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component'
os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1'
metadata = {}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_components': 'auto_component,auto_component1',
'issue_labels': 'auto_label,auto_label1',
}, metadata)
def test_add_append(self):
"""Test adding issue metadata when there are already existing metadata."""
os.environ['AUTOMATIC_LABELS'] = 'auto_label'
os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component'
os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1'
metadata = {
'issue_components': 'existing_component',
'issue_labels': 'existing_label'
}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_components':
'existing_component,auto_component,auto_component1',
'issue_labels':
'existing_label,auto_label,auto_label1',
}, metadata)
def test_add_numeric(self):
"""Tests adding a numeric label."""
os.environ['AUTOMATIC_LABELS'] = '123'
metadata = {}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_labels': '123',
}, metadata)
|
[] |
[] |
[
"MAX_TESTCASES",
"BUILD_DIR",
"FUZZ_TARGET",
"FUZZ_INPUTS",
"AUTOMATIC_LABELS_1",
"FUZZ_INPUTS_DISK",
"JOB_NAME",
"ROOT_DIR",
"APP_REVISION",
"CORPUS_BUCKET",
"FAIL_RETRIES",
"AUTOMATIC_LABELS",
"AUTOMATIC_COMPONENTS_1",
"AUTOMATIC_COMPONENTS"
] |
[]
|
["MAX_TESTCASES", "BUILD_DIR", "FUZZ_TARGET", "FUZZ_INPUTS", "AUTOMATIC_LABELS_1", "FUZZ_INPUTS_DISK", "JOB_NAME", "ROOT_DIR", "APP_REVISION", "CORPUS_BUCKET", "FAIL_RETRIES", "AUTOMATIC_LABELS", "AUTOMATIC_COMPONENTS_1", "AUTOMATIC_COMPONENTS"]
|
python
| 14 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'Friday'
year = '2017'
author = u'Matt Gregory'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/grovduck/python-friday/issues/%s', '#'),
'pr': ('https://github.com/grovduck/python-friday/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
[] |
[] |
[
"SPELLCHECK",
"READTHEDOCS"
] |
[]
|
["SPELLCHECK", "READTHEDOCS"]
|
python
| 2 | 0 | |
go/internal/gcimporter/gcimporter_test.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter_test.go,
// adjusted to make it build with code from (std lib) internal/testenv copied.
package gcimporter
import (
"bytes"
"fmt"
"go/build"
"go/constant"
"go/types"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"golang.org/x/tools/internal/testenv"
)
func TestMain(m *testing.M) {
testenv.ExitIfSmallMachine()
os.Exit(m.Run())
}
// ----------------------------------------------------------------------------
func needsCompiler(t *testing.T, compiler string) {
if runtime.Compiler == compiler {
return
}
switch compiler {
case "gc":
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
}
// compile runs the compiler on filename, with dirname as the working directory,
// and writes the output file to outdirname.
func compile(t *testing.T, dirname, filename, outdirname string) string {
testenv.NeedsGoBuild(t)
// filename must end with ".go"
if !strings.HasSuffix(filename, ".go") {
t.Fatalf("filename doesn't end in .go: %s", filename)
}
basename := filepath.Base(filename)
outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o")
cmd := exec.Command("go", "tool", "compile", "-o", outname, filename)
cmd.Dir = dirname
out, err := cmd.CombinedOutput()
if err != nil {
t.Logf("%s", out)
t.Fatalf("go tool compile %s failed: %s", filename, err)
}
return outname
}
func testPath(t *testing.T, path, srcDir string) *types.Package {
t0 := time.Now()
pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
if err != nil {
t.Errorf("testPath(%s): %s", path, err)
return nil
}
t.Logf("testPath(%s): %v", path, time.Since(t0))
return pkg
}
const maxTime = 30 * time.Second
func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
list, err := ioutil.ReadDir(dirname)
if err != nil {
t.Fatalf("testDir(%s): %s", dirname, err)
}
for _, f := range list {
if time.Now().After(endTime) {
t.Log("testing time used up")
return
}
switch {
case !f.IsDir():
// try extensions
for _, ext := range pkgExts {
if strings.HasSuffix(f.Name(), ext) {
name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
if testPath(t, filepath.Join(dir, name), dir) != nil {
nimports++
}
}
}
case f.IsDir():
nimports += testDir(t, filepath.Join(dir, f.Name()), endTime)
}
}
return
}
func mktmpdir(t *testing.T) string {
tmpdir, err := ioutil.TempDir("", "gcimporter_test")
if err != nil {
t.Fatal("mktmpdir:", err)
}
if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil {
os.RemoveAll(tmpdir)
t.Fatal("mktmpdir:", err)
}
return tmpdir
}
const testfile = "exports.go"
func TestImportTestdata(t *testing.T) {
needsCompiler(t, "gc")
tmpdir := mktmpdir(t)
defer os.RemoveAll(tmpdir)
compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"))
// filename should end with ".go"
filename := testfile[:len(testfile)-3]
if pkg := testPath(t, "./testdata/"+filename, tmpdir); pkg != nil {
// The package's Imports list must include all packages
// explicitly imported by testfile, plus all packages
// referenced indirectly via exported objects in testfile.
// With the textual export format (when run against Go1.6),
// the list may also include additional packages that are
// not strictly required for import processing alone (they
// are exported to err "on the safe side").
// For now, we just test the presence of a few packages
// that we know are there for sure.
got := fmt.Sprint(pkg.Imports())
for _, want := range []string{"go/ast", "go/token"} {
if !strings.Contains(got, want) {
t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
}
}
}
}
func TestVersionHandling(t *testing.T) {
if debug {
t.Skip("TestVersionHandling panics in debug mode")
}
// This package only handles gc export data.
needsCompiler(t, "gc")
const dir = "./testdata/versions"
list, err := ioutil.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
tmpdir := mktmpdir(t)
defer os.RemoveAll(tmpdir)
corruptdir := filepath.Join(tmpdir, "testdata", "versions")
if err := os.Mkdir(corruptdir, 0700); err != nil {
t.Fatal(err)
}
for _, f := range list {
name := f.Name()
if !strings.HasSuffix(name, ".a") {
continue // not a package file
}
if strings.Contains(name, "corrupted") {
continue // don't process a leftover corrupted file
}
pkgpath := "./" + name[:len(name)-2]
if testing.Verbose() {
t.Logf("importing %s", name)
}
// test that export data can be imported
_, err := Import(make(map[string]*types.Package), pkgpath, dir, nil)
if err != nil {
// ok to fail if it fails with a newer version error for select files
if strings.Contains(err.Error(), "newer version") {
switch name {
case "test_go1.11_999b.a", "test_go1.11_999i.a":
continue
}
// fall through
}
t.Errorf("import %q failed: %v", pkgpath, err)
continue
}
// create file with corrupted export data
// 1) read file
data, err := ioutil.ReadFile(filepath.Join(dir, name))
if err != nil {
t.Fatal(err)
}
// 2) find export data
i := bytes.Index(data, []byte("\n$$B\n")) + 5
j := bytes.Index(data[i:], []byte("\n$$\n")) + i
if i < 0 || j < 0 || i > j {
t.Fatalf("export data section not found (i = %d, j = %d)", i, j)
}
// 3) corrupt the data (increment every 7th byte)
for k := j - 13; k >= i; k -= 7 {
data[k]++
}
// 4) write the file
pkgpath += "_corrupted"
filename := filepath.Join(corruptdir, pkgpath) + ".a"
ioutil.WriteFile(filename, data, 0666)
// test that importing the corrupted file results in an error
_, err = Import(make(map[string]*types.Package), pkgpath, corruptdir, nil)
if err == nil {
t.Errorf("import corrupted %q succeeded", pkgpath)
} else if msg := err.Error(); !strings.Contains(msg, "version skew") {
t.Errorf("import %q error incorrect (%s)", pkgpath, msg)
}
}
}
func TestImportStdLib(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
dt := maxTime
if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
dt = 10 * time.Millisecond
}
nimports := testDir(t, "", time.Now().Add(dt)) // installed packages
t.Logf("tested %d imports", nimports)
}
var importedObjectTests = []struct {
name string
want string
}{
// non-interfaces
{"crypto.Hash", "type Hash uint"},
{"go/ast.ObjKind", "type ObjKind int"},
{"go/types.Qualifier", "type Qualifier func(*Package) string"},
{"go/types.Comparable", "func Comparable(T Type) bool"},
{"math.Pi", "const Pi untyped float"},
{"math.Sin", "func Sin(x float64) float64"},
{"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
{"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
// interfaces
{"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
{"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"},
{"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"},
{"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
{"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
{"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
{"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
}
// TODO(rsc): Delete this init func after x/tools no longer needs to test successfully with Go 1.17.
func init() {
if build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] <= "go1.17" {
for i := range importedObjectTests {
if importedObjectTests[i].name == "context.Context" {
// Expand any to interface{}.
importedObjectTests[i].want = "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key interface{}) interface{}}"
}
}
}
}
func TestImportedTypes(t *testing.T) {
testenv.NeedsGo1Point(t, 11)
// This package only handles gc export data.
needsCompiler(t, "gc")
for _, test := range importedObjectTests {
obj := importObject(t, test.name)
if obj == nil {
continue // error reported elsewhere
}
got := types.ObjectString(obj, types.RelativeTo(obj.Pkg()))
// TODO(rsc): Delete this block once go.dev/cl/368254 lands.
if got != test.want && test.want == strings.ReplaceAll(got, "interface{}", "any") {
got = test.want
}
if got != test.want {
t.Errorf("%s: got %q; want %q", test.name, got, test.want)
}
if named, _ := obj.Type().(*types.Named); named != nil {
verifyInterfaceMethodRecvs(t, named, 0)
}
}
}
func TestImportedConsts(t *testing.T) {
testenv.NeedsGo1Point(t, 11)
tests := []struct {
name string
want constant.Kind
}{
{"math.Pi", constant.Float},
{"math.MaxFloat64", constant.Float},
{"math.MaxInt64", constant.Int},
}
for _, test := range tests {
obj := importObject(t, test.name)
if got := obj.(*types.Const).Val().Kind(); got != test.want {
t.Errorf("%s: imported as constant.Kind(%v), want constant.Kind(%v)", test.name, got, test.want)
}
}
}
// importObject imports the object specified by a name of the form
// <import path>.<object name>, e.g. go/types.Type.
//
// If any errors occur they are reported via t and the resulting object will
// be nil.
func importObject(t *testing.T, name string) types.Object {
s := strings.Split(name, ".")
if len(s) != 2 {
t.Fatal("inconsistent test data")
}
importPath := s[0]
objName := s[1]
pkg, err := Import(make(map[string]*types.Package), importPath, ".", nil)
if err != nil {
t.Error(err)
return nil
}
obj := pkg.Scope().Lookup(objName)
if obj == nil {
t.Errorf("%s: object not found", name)
return nil
}
return obj
}
// verifyInterfaceMethodRecvs verifies that method receiver types
// are named if the methods belong to a named interface type.
func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) {
// avoid endless recursion in case of an embedding bug that lead to a cycle
if level > 10 {
t.Errorf("%s: embeds itself", named)
return
}
iface, _ := named.Underlying().(*types.Interface)
if iface == nil {
return // not an interface
}
// check explicitly declared methods
for i := 0; i < iface.NumExplicitMethods(); i++ {
m := iface.ExplicitMethod(i)
recv := m.Type().(*types.Signature).Recv()
if recv == nil {
t.Errorf("%s: missing receiver type", m)
continue
}
if recv.Type() != named {
t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named)
}
}
// check embedded interfaces (if they are named, too)
for i := 0; i < iface.NumEmbeddeds(); i++ {
// embedding of interfaces cannot have cycles; recursion will terminate
if etype, _ := iface.EmbeddedType(i).(*types.Named); etype != nil {
verifyInterfaceMethodRecvs(t, etype, level+1)
}
}
}
func TestIssue5815(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
pkg := importPkg(t, "strings", ".")
scope := pkg.Scope()
for _, name := range scope.Names() {
obj := scope.Lookup(name)
if obj.Pkg() == nil {
t.Errorf("no pkg for %s", obj)
}
if tname, _ := obj.(*types.TypeName); tname != nil {
named := tname.Type().(*types.Named)
for i := 0; i < named.NumMethods(); i++ {
m := named.Method(i)
if m.Pkg() == nil {
t.Errorf("no pkg for %s", m)
}
}
}
}
}
// Smoke test to ensure that imported methods get the correct package.
func TestCorrectMethodPackage(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
imports := make(map[string]*types.Package)
_, err := Import(imports, "net/http", ".", nil)
if err != nil {
t.Fatal(err)
}
mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type()
mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex
sel := mset.Lookup(nil, "Lock")
lock := sel.Obj().(*types.Func)
if got, want := lock.Pkg().Path(), "sync"; got != want {
t.Errorf("got package path %q; want %q", got, want)
}
}
func TestIssue13566(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
// On windows, we have to set the -D option for the compiler to avoid having a drive
// letter and an illegal ':' in the import path - just skip it (see also issue #3483).
if runtime.GOOS == "windows" {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
tmpdir := mktmpdir(t)
defer os.RemoveAll(tmpdir)
testoutdir := filepath.Join(tmpdir, "testdata")
// b.go needs to be compiled from the output directory so that the compiler can
// find the compiled package a. We pass the full path to compile() so that we
// don't have to copy the file to that directory.
bpath, err := filepath.Abs(filepath.Join("testdata", "b.go"))
if err != nil {
t.Fatal(err)
}
compile(t, "testdata", "a.go", testoutdir)
compile(t, testoutdir, bpath, testoutdir)
// import must succeed (test for issue at hand)
pkg := importPkg(t, "./testdata/b", tmpdir)
// make sure all indirectly imported packages have names
for _, imp := range pkg.Imports() {
if imp.Name() == "" {
t.Errorf("no name for %s package", imp.Path())
}
}
}
func TestIssue13898(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
// import go/internal/gcimporter which imports go/types partially
imports := make(map[string]*types.Package)
_, err := Import(imports, "go/internal/gcimporter", ".", nil)
if err != nil {
t.Fatal(err)
}
// look for go/types package
var goTypesPkg *types.Package
for path, pkg := range imports {
if path == "go/types" {
goTypesPkg = pkg
break
}
}
if goTypesPkg == nil {
t.Fatal("go/types not found")
}
// look for go/types.Object type
obj := lookupObj(t, goTypesPkg.Scope(), "Object")
typ, ok := obj.Type().(*types.Named)
if !ok {
t.Fatalf("go/types.Object type is %v; wanted named type", typ)
}
// lookup go/types.Object.Pkg method
m, index, indirect := types.LookupFieldOrMethod(typ, false, nil, "Pkg")
if m == nil {
t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
}
// the method must belong to go/types
if m.Pkg().Path() != "go/types" {
t.Fatalf("found %v; want go/types", m.Pkg())
}
}
func TestIssue15517(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
// On windows, we have to set the -D option for the compiler to avoid having a drive
// letter and an illegal ':' in the import path - just skip it (see also issue #3483).
if runtime.GOOS == "windows" {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
tmpdir := mktmpdir(t)
defer os.RemoveAll(tmpdir)
compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"))
// Multiple imports of p must succeed without redeclaration errors.
// We use an import path that's not cleaned up so that the eventual
// file path for the package is different from the package path; this
// will expose the error if it is present.
//
// (Issue: Both the textual and the binary importer used the file path
// of the package to be imported as key into the shared packages map.
// However, the binary importer then used the package path to identify
// the imported package to mark it as complete; effectively marking the
// wrong package as complete. By using an "unclean" package path, the
// file and package path are different, exposing the problem if present.
// The same issue occurs with vendoring.)
imports := make(map[string]*types.Package)
for i := 0; i < 3; i++ {
if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil {
t.Fatal(err)
}
}
}
func TestIssue15920(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
// On windows, we have to set the -D option for the compiler to avoid having a drive
// letter and an illegal ':' in the import path - just skip it (see also issue #3483).
if runtime.GOOS == "windows" {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
compileAndImportPkg(t, "issue15920")
}
func TestIssue20046(t *testing.T) {
// This package only handles gc export data.
needsCompiler(t, "gc")
// On windows, we have to set the -D option for the compiler to avoid having a drive
// letter and an illegal ':' in the import path - just skip it (see also issue #3483).
if runtime.GOOS == "windows" {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
// "./issue20046".V.M must exist
pkg := compileAndImportPkg(t, "issue20046")
obj := lookupObj(t, pkg.Scope(), "V")
if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil {
t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect)
}
}
func TestIssue25301(t *testing.T) {
testenv.NeedsGo1Point(t, 11)
// This package only handles gc export data.
needsCompiler(t, "gc")
// On windows, we have to set the -D option for the compiler to avoid having a drive
// letter and an illegal ':' in the import path - just skip it (see also issue #3483).
if runtime.GOOS == "windows" {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
compileAndImportPkg(t, "issue25301")
}
func importPkg(t *testing.T, path, srcDir string) *types.Package {
pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
if err != nil {
t.Fatal(err)
}
return pkg
}
func compileAndImportPkg(t *testing.T, name string) *types.Package {
tmpdir := mktmpdir(t)
defer os.RemoveAll(tmpdir)
compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"))
return importPkg(t, "./testdata/"+name, tmpdir)
}
func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object {
if obj := scope.Lookup(name); obj != nil {
return obj
}
t.Fatalf("%s not found", name)
return nil
}
|
[
"\"GO_BUILDER_NAME\""
] |
[] |
[
"GO_BUILDER_NAME"
] |
[]
|
["GO_BUILDER_NAME"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"log"
"net/http"
"os"
"strings"
"WebRTCConf/auth"
"WebRTCConf/signaling"
"github.com/gorilla/sessions"
"github.com/joho/godotenv"
"github.com/rs/cors"
)
func init() {
// loads values from .env into the system
if err := godotenv.Load(); err != nil {
log.Print("No .env file found")
}
auth.Env.GithubClientID = os.Getenv("GITHUB_CLIENT_ID")
auth.Env.GithubClientSecret = os.Getenv("GITHUB_CLIENT_SECRET")
auth.Env.GoogleClientID = os.Getenv("GOOGLE_CLIENT_ID")
auth.Env.GoogleClientSecret = os.Getenv("GOOGLE_CLIENT_SECRET")
auth.Env.RedirectURI = os.Getenv("REDIRECT_URI")
iceUrls := os.Getenv("ICE_URLS")
auth.Env.IceURLs = strings.Split(iceUrls, "#")
iceTokens := os.Getenv("ICE_TOKENS")
auth.Env.IceTokens = strings.Split(iceTokens, "#")
auth.Store = sessions.NewCookieStore([]byte(os.Getenv("SESSION_KEY")))
}
func main() {
go signaling.RManager.HandleChannels()
mux := http.NewServeMux()
mux.HandleFunc("/", auth.Redirect)
mux.HandleFunc("/getSession", auth.GetSession)
mux.HandleFunc("/auth", auth.OAuth)
mux.HandleFunc("/getUser", auth.GetUser)
mux.HandleFunc("/deleteUser", auth.DeleteUser)
mux.HandleFunc("/newRoom", auth.NewRoom)
mux.HandleFunc("/deleteRoom", auth.DeleteRoom)
mux.HandleFunc("/checkRoom", auth.CheckRoom)
mux.HandleFunc("/toggle", auth.ToggleRoomLock)
mux.HandleFunc("/iceserver", auth.IceServer)
mux.HandleFunc("/ws", signaling.WebSocketHandler)
mux.HandleFunc("/logout", auth.LogoutSession)
mux.HandleFunc("/oneTapAuth", auth.OneTapAuth)
c := cors.New(cors.Options{
AllowedOrigins: []string{"http://localhost:8000", "https://localhost:8000", "https://proximo.netlify.app","https://proximo.pw"},
AllowCredentials: true,
// Enable Debugging for testing, consider disabling in production
Debug: false,
})
handler := c.Handler(mux)
log.Println("server started port " + os.Getenv("PORT"))
http.ListenAndServe(":"+os.Getenv("PORT"), handler)
}
|
[
"\"GITHUB_CLIENT_ID\"",
"\"GITHUB_CLIENT_SECRET\"",
"\"GOOGLE_CLIENT_ID\"",
"\"GOOGLE_CLIENT_SECRET\"",
"\"REDIRECT_URI\"",
"\"ICE_URLS\"",
"\"ICE_TOKENS\"",
"\"SESSION_KEY\"",
"\"PORT\"",
"\"PORT\""
] |
[] |
[
"PORT",
"GITHUB_CLIENT_SECRET",
"ICE_URLS",
"REDIRECT_URI",
"SESSION_KEY",
"ICE_TOKENS",
"GOOGLE_CLIENT_SECRET",
"GOOGLE_CLIENT_ID",
"GITHUB_CLIENT_ID"
] |
[]
|
["PORT", "GITHUB_CLIENT_SECRET", "ICE_URLS", "REDIRECT_URI", "SESSION_KEY", "ICE_TOKENS", "GOOGLE_CLIENT_SECRET", "GOOGLE_CLIENT_ID", "GITHUB_CLIENT_ID"]
|
go
| 9 | 0 | |
vm/kvm/kvm.go
|
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
// Package kvm provides VMs based on lkvm (kvmtool) virtualization.
// It is not well tested.
package kvm
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"sync"
"time"
"github.com/google/syzkaller/pkg/config"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/vm/vmimpl"
)
const (
hostAddr = "192.168.33.1"
)
func init() {
vmimpl.Register("kvm", ctor)
}
type Config struct {
Count int // number of VMs to use
Lkvm string // lkvm binary name
Kernel string // e.g. arch/x86/boot/bzImage
Cmdline string // kernel command line
CPU int // number of VM CPUs
Mem int // amount of VM memory in MBs
}
type Pool struct {
env *vmimpl.Env
cfg *Config
}
type instance struct {
cfg *Config
sandbox string
sandboxPath string
lkvm *exec.Cmd
readerC chan error
waiterC chan error
debug bool
mu sync.Mutex
outputB []byte
outputC chan []byte
}
func ctor(env *vmimpl.Env) (vmimpl.Pool, error) {
cfg := &Config{
Count: 1,
Lkvm: "lkvm",
}
if err := config.LoadData(env.Config, cfg); err != nil {
return nil, fmt.Errorf("failed to parse kvm vm config: %v", err)
}
if cfg.Count < 1 || cfg.Count > 1000 {
return nil, fmt.Errorf("invalid config param count: %v, want [1, 1000]", cfg.Count)
}
if env.Debug {
cfg.Count = 1
}
if env.Image != "" {
return nil, fmt.Errorf("lkvm does not support custom images")
}
if _, err := exec.LookPath(cfg.Lkvm); err != nil {
return nil, err
}
if !osutil.IsExist(cfg.Kernel) {
return nil, fmt.Errorf("kernel file '%v' does not exist", cfg.Kernel)
}
if cfg.CPU < 1 || cfg.CPU > 1024 {
return nil, fmt.Errorf("invalid config param cpu: %v, want [1-1024]", cfg.CPU)
}
if cfg.Mem < 128 || cfg.Mem > 1048576 {
return nil, fmt.Errorf("invalid config param mem: %v, want [128-1048576]", cfg.Mem)
}
cfg.Kernel = osutil.Abs(cfg.Kernel)
pool := &Pool{
cfg: cfg,
env: env,
}
return pool, nil
}
func (pool *Pool) Count() int {
return pool.cfg.Count
}
func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
sandbox := fmt.Sprintf("syz-%v", index)
inst := &instance{
cfg: pool.cfg,
sandbox: sandbox,
sandboxPath: filepath.Join(os.Getenv("HOME"), ".lkvm", sandbox),
debug: pool.env.Debug,
}
closeInst := inst
defer func() {
if closeInst != nil {
closeInst.Close()
}
}()
os.RemoveAll(inst.sandboxPath)
os.Remove(inst.sandboxPath + ".sock")
out, err := osutil.Command(inst.cfg.Lkvm, "setup", sandbox).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("failed to lkvm setup: %v\n%s", err, out)
}
scriptPath := filepath.Join(workdir, "script.sh")
if err := osutil.WriteExecFile(scriptPath, []byte(script)); err != nil {
return nil, fmt.Errorf("failed to create temp file: %v", err)
}
rpipe, wpipe, err := osutil.LongPipe()
if err != nil {
return nil, fmt.Errorf("failed to create pipe: %v", err)
}
inst.lkvm = osutil.Command("taskset", "-c", strconv.Itoa(index%runtime.NumCPU()),
inst.cfg.Lkvm, "sandbox",
"--disk", inst.sandbox,
"--kernel", inst.cfg.Kernel,
"--params", "slub_debug=UZ "+inst.cfg.Cmdline,
"--mem", strconv.Itoa(inst.cfg.Mem),
"--cpus", strconv.Itoa(inst.cfg.CPU),
"--network", "mode=user",
"--sandbox", scriptPath,
)
inst.lkvm.Stdout = wpipe
inst.lkvm.Stderr = wpipe
if err := inst.lkvm.Start(); err != nil {
rpipe.Close()
wpipe.Close()
return nil, fmt.Errorf("failed to start lkvm: %v", err)
}
// Start output reading goroutine.
inst.readerC = make(chan error)
go func() {
var buf [64 << 10]byte
for {
n, err := rpipe.Read(buf[:])
if n != 0 {
if inst.debug {
os.Stdout.Write(buf[:n])
os.Stdout.Write([]byte{'\n'})
}
inst.mu.Lock()
inst.outputB = append(inst.outputB, buf[:n]...)
if inst.outputC != nil {
select {
case inst.outputC <- inst.outputB:
inst.outputB = nil
default:
}
}
inst.mu.Unlock()
time.Sleep(time.Millisecond)
}
if err != nil {
rpipe.Close()
inst.readerC <- err
return
}
}
}()
// Wait for the lkvm asynchronously.
inst.waiterC = make(chan error, 1)
go func() {
err := inst.lkvm.Wait()
wpipe.Close()
inst.waiterC <- err
}()
// Wait for the script to start serving.
_, errc, err := inst.Run(10*time.Minute, nil, "mount -t debugfs none /sys/kernel/debug/")
if err == nil {
err = <-errc
}
if err != nil {
return nil, fmt.Errorf("failed to run script: %v", err)
}
closeInst = nil
return inst, nil
}
func (inst *instance) Close() {
if inst.lkvm != nil {
inst.lkvm.Process.Kill()
err := <-inst.waiterC
inst.waiterC <- err // repost it for waiting goroutines
<-inst.readerC
}
os.RemoveAll(inst.sandboxPath)
os.Remove(inst.sandboxPath + ".sock")
}
func (inst *instance) Forward(port int) (string, error) {
return fmt.Sprintf("%v:%v", hostAddr, port), nil
}
func (inst *instance) Copy(hostSrc string) (string, error) {
vmDst := filepath.Join("/", filepath.Base(hostSrc))
dst := filepath.Join(inst.sandboxPath, vmDst)
if err := osutil.CopyFile(hostSrc, dst); err != nil {
return "", err
}
if err := os.Chmod(dst, 0777); err != nil {
return "", err
}
return vmDst, nil
}
func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command string) (
<-chan []byte, <-chan error, error) {
outputC := make(chan []byte, 10)
errorC := make(chan error, 1)
inst.mu.Lock()
inst.outputB = nil
inst.outputC = outputC
inst.mu.Unlock()
cmdFile := filepath.Join(inst.sandboxPath, "/syz-cmd")
tmpFile := cmdFile + "-tmp"
if err := osutil.WriteExecFile(tmpFile, []byte(command)); err != nil {
return nil, nil, err
}
if err := os.Rename(tmpFile, cmdFile); err != nil {
return nil, nil, err
}
signal := func(err error) {
inst.mu.Lock()
if inst.outputC == outputC {
inst.outputB = nil
inst.outputC = nil
}
inst.mu.Unlock()
errorC <- err
}
go func() {
timeoutTicker := time.NewTicker(timeout)
secondTicker := time.NewTicker(time.Second)
var resultErr error
loop:
for {
select {
case <-timeoutTicker.C:
resultErr = vmimpl.ErrTimeout
break loop
case <-stop:
resultErr = vmimpl.ErrTimeout
break loop
case <-secondTicker.C:
if !osutil.IsExist(cmdFile) {
resultErr = nil
break loop
}
case err := <-inst.waiterC:
inst.waiterC <- err // repost it for Close
resultErr = fmt.Errorf("lkvm exited")
break loop
}
}
signal(resultErr)
timeoutTicker.Stop()
secondTicker.Stop()
}()
return outputC, errorC, nil
}
const script = `#! /bin/bash
while true; do
if [ -e "/syz-cmd" ]; then
/syz-cmd
rm -f /syz-cmd
else
sleep 1
fi
done
`
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cmd/sonarExecuteScan_test.go
|
package cmd
import (
"net/http"
"os"
"os/exec"
"path"
"testing"
piperHttp "github.com/SAP/jenkins-library/pkg/http"
"github.com/SAP/jenkins-library/pkg/mock"
FileUtils "github.com/SAP/jenkins-library/pkg/piperutils"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
//TODO: extract to mock package
type mockDownloader struct {
shouldFail bool
requestedURL []string
requestedFile []string
}
func (m *mockDownloader) DownloadFile(url, filename string, header http.Header, cookies []*http.Cookie) error {
m.requestedURL = append(m.requestedURL, url)
m.requestedFile = append(m.requestedFile, filename)
if m.shouldFail {
return errors.New("something happened")
}
return nil
}
func (m *mockDownloader) SetOptions(options piperHttp.ClientOptions) {}
func mockFileUtilsExists(exists bool) func(string) (bool, error) {
return func(filename string) (bool, error) {
if exists {
return true, nil
}
return false, errors.New("something happened")
}
}
func mockExecLookPath(executable string) (string, error) {
if executable == "local-sonar-scanner" {
return "/usr/bin/sonar-scanner", nil
}
return "", errors.New("something happened")
}
func mockFileUtilsUnzip(t *testing.T, expectSrc string) func(string, string) ([]string, error) {
return func(src, dest string) ([]string, error) {
assert.Equal(t, path.Join(dest, expectSrc), src)
return []string{}, nil
}
}
func mockOsRename(t *testing.T, expectOld, expectNew string) func(string, string) error {
return func(old, new string) error {
assert.Regexp(t, expectOld, old)
assert.Equal(t, expectNew, new)
return nil
}
}
func TestRunSonar(t *testing.T) {
mockRunner := mock.ExecMockRunner{}
mockClient := mockDownloader{shouldFail: false}
t.Run("default", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
CustomTLSCertificateLinks: "",
Token: "secret-ABC",
Host: "https://sonar.sap.com",
Organization: "SAP",
ProjectVersion: "1.2.3",
}
fileUtilsExists = mockFileUtilsExists(true)
os.Setenv("PIPER_SONAR_LOAD_CERTIFICATES", "true")
require.Equal(t, "true", os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must be set")
defer func() {
fileUtilsExists = FileUtils.FileExists
os.Unsetenv("PIPER_SONAR_LOAD_CERTIFICATES")
}()
// test
err := runSonar(options, &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "-Dsonar.projectVersion=1.2.3")
assert.Contains(t, sonar.options, "-Dsonar.organization=SAP")
assert.Contains(t, sonar.environment, "SONAR_HOST_URL=https://sonar.sap.com")
assert.Contains(t, sonar.environment, "SONAR_TOKEN=secret-ABC")
assert.Contains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+path.Join(getWorkingDir(), ".certificates", "cacerts"))
})
t.Run("with custom options", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
Options: "-Dsonar.projectKey=piper",
}
fileUtilsExists = mockFileUtilsExists(true)
defer func() {
fileUtilsExists = FileUtils.FileExists
}()
// test
err := runSonar(options, &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "-Dsonar.projectKey=piper")
})
}
func TestSonarHandlePullRequest(t *testing.T) {
t.Run("default", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
ChangeID: "123",
PullRequestProvider: "GitHub",
ChangeBranch: "feat/bogus",
ChangeTarget: "master",
Owner: "SAP",
Repository: "jenkins-library",
}
// test
err := handlePullRequest(options)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "sonar.pullrequest.key=123")
assert.Contains(t, sonar.options, "sonar.pullrequest.provider=github")
assert.Contains(t, sonar.options, "sonar.pullrequest.base=master")
assert.Contains(t, sonar.options, "sonar.pullrequest.branch=feat/bogus")
assert.Contains(t, sonar.options, "sonar.pullrequest.github.repository=SAP/jenkins-library")
})
t.Run("unsupported scm provider", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
ChangeID: "123",
PullRequestProvider: "Gerrit",
}
// test
err := handlePullRequest(options)
// assert
assert.Error(t, err)
assert.Equal(t, "Pull-Request provider 'gerrit' is not supported!", err.Error())
})
t.Run("legacy", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
LegacyPRHandling: true,
ChangeID: "123",
Owner: "SAP",
Repository: "jenkins-library",
GithubToken: "some-token",
DisableInlineComments: true,
}
// test
err := handlePullRequest(options)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "sonar.analysis.mode=preview")
assert.Contains(t, sonar.options, "sonar.github.pullRequest=123")
assert.Contains(t, sonar.options, "sonar.github.oauth=some-token")
assert.Contains(t, sonar.options, "sonar.github.repository=SAP/jenkins-library")
assert.Contains(t, sonar.options, "sonar.github.disableInlineComments=true")
})
}
func TestSonarLoadScanner(t *testing.T) {
mockClient := mockDownloader{shouldFail: false}
t.Run("use preinstalled sonar-scanner", func(t *testing.T) {
// init
ignore := ""
sonar = sonarSettings{
binary: "local-sonar-scanner",
environment: []string{},
options: []string{},
}
execLookPath = mockExecLookPath
defer func() { execLookPath = exec.LookPath }()
// test
err := loadSonarScanner(ignore, &mockClient)
// assert
assert.NoError(t, err)
assert.Equal(t, "local-sonar-scanner", sonar.binary)
})
t.Run("use downloaded sonar-scanner", func(t *testing.T) {
// init
url := "https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.3.0.2102-linux.zip"
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
execLookPath = mockExecLookPath
fileUtilsUnzip = mockFileUtilsUnzip(t, "sonar-scanner-cli-4.3.0.2102-linux.zip")
osRename = mockOsRename(t, "sonar-scanner-4.3.0.2102-linux", ".sonar-scanner")
defer func() {
execLookPath = exec.LookPath
fileUtilsUnzip = FileUtils.Unzip
osRename = os.Rename
}()
// test
err := loadSonarScanner(url, &mockClient)
// assert
assert.NoError(t, err)
assert.Equal(t, url, mockClient.requestedURL[0])
assert.Regexp(t, "sonar-scanner-cli-4.3.0.2102-linux.zip$", mockClient.requestedFile[0])
assert.Equal(t, path.Join(getWorkingDir(), ".sonar-scanner", "bin", "sonar-scanner"), sonar.binary)
})
}
func TestSonarLoadCertificates(t *testing.T) {
mockRunner := mock.ExecMockRunner{}
mockClient := mockDownloader{shouldFail: false}
t.Run("use local trust store", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(true)
defer func() { fileUtilsExists = FileUtils.FileExists }()
// test
err := loadCertificates("", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+path.Join(getWorkingDir(), ".certificates", "cacerts"))
})
t.Run("use local trust store with downloaded certificates", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(false)
os.Setenv("PIPER_SONAR_LOAD_CERTIFICATES", "true")
require.Equal(t, "true", os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must be set")
defer func() {
fileUtilsExists = FileUtils.FileExists
os.Unsetenv("PIPER_SONAR_LOAD_CERTIFICATES")
}()
// test
err := loadCertificates("https://sap.com/custom-1.crt,https://sap.com/custom-2.crt", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Equal(t, "https://sap.com/custom-1.crt", mockClient.requestedURL[0])
assert.Equal(t, "https://sap.com/custom-2.crt", mockClient.requestedURL[1])
assert.Regexp(t, "custom-1.crt$", mockClient.requestedFile[0])
assert.Regexp(t, "custom-2.crt$", mockClient.requestedFile[1])
assert.Contains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+path.Join(getWorkingDir(), ".certificates", "cacerts"))
})
t.Run("use local trust store with downloaded certificates - deactivated", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(false)
require.Empty(t, os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must not be set")
defer func() { fileUtilsExists = FileUtils.FileExists }()
// test
err := loadCertificates("any-certificate-url", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.NotContains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+path.Join(getWorkingDir(), ".certificates", "cacerts"))
})
t.Run("use no trust store", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(false)
os.Setenv("PIPER_SONAR_LOAD_CERTIFICATES", "true")
require.Equal(t, "true", os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must be set")
defer func() {
fileUtilsExists = FileUtils.FileExists
os.Unsetenv("PIPER_SONAR_LOAD_CERTIFICATES")
}()
// test
err := loadCertificates("", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Empty(t, sonar.environment)
})
}
|
[
"\"PIPER_SONAR_LOAD_CERTIFICATES\"",
"\"PIPER_SONAR_LOAD_CERTIFICATES\"",
"\"PIPER_SONAR_LOAD_CERTIFICATES\"",
"\"PIPER_SONAR_LOAD_CERTIFICATES\""
] |
[] |
[
"PIPER_SONAR_LOAD_CERTIFICATES"
] |
[]
|
["PIPER_SONAR_LOAD_CERTIFICATES"]
|
go
| 1 | 0 | |
deploy/deploy.go
|
package deploy
import (
"fmt"
"os"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/flexiant/kdeploy/fetchers"
"github.com/flexiant/kdeploy/template"
"github.com/flexiant/kdeploy/utils"
"github.com/flexiant/kdeploy/webservice"
)
// CmdDeploy implements the 'deploy' command
func CmdDeploy(c *cli.Context) {
utils.CheckRequiredFlags(c, []string{"kubeware"})
var kubeware string
var localKubePath string
var err error
kubeware = os.Getenv("KDEPLOY_KUBEWARE")
localKubePath, err = fetchers.Fetch(kubeware)
if err != nil {
log.Fatal(fmt.Errorf("Could not fetch kubeware: '%s' (%v)", kubeware, err))
}
log.Debugf("Going to parse kubeware in %s", localKubePath)
metadata := template.ParseMetadata(localKubePath)
defaults, err := metadata.AttributeDefaults()
utils.CheckError(err)
// build attributes merging "role list" to defaults
log.Debugf("Building attributes")
attributes := template.BuildAttributes(c.String("attribute"), defaults)
// get list of services and parse each one
log.Debugf("Parsing services")
servicesSpecs, err := metadata.ParseServices(attributes)
utils.CheckError(err)
// get list of replica controllers and parse each one
log.Debugf("Parsing controllers")
controllersSpecs, err := metadata.ParseControllers(attributes)
utils.CheckError(err)
// creates Kubernetes client
kubernetes, err := webservice.NewKubeClient()
utils.CheckError(err)
// check if kubeware already exists
log.Debugf("Checking if already deployed")
deployedVersion, err := kubernetes.FindDeployedKubewareVersion(os.Getenv("KDEPLOY_NAMESPACE"), metadata.Name)
utils.CheckError(err)
if deployedVersion != "" {
log.Errorf("Can not deploy '%s' since version '%s' is already deployed", metadata.Name, deployedVersion)
return
}
// create each of the services
log.Debugf("Creating services")
err = kubernetes.CreateServices(utils.Values(servicesSpecs))
utils.CheckError(err)
// create each of the controllers
log.Debugf("Creating controllers")
err = kubernetes.CreateReplicaControllers(utils.Values(controllersSpecs))
utils.CheckError(err)
log.Infof("Kubeware %s from %s has been deployed", metadata.Name, os.Getenv("KDEPLOY_KUBEWARE"))
}
// func isLocalURL(kube string) bool {
// kubewareURL, err := url.Parse(kube)
// return (err == nil && kubewareURL.Scheme == "file")
// }
//
// func extractAbsolutePath(kube string) bool {
// kubewareURL, err := url.Parse(kube)
// if err == nil && kubewareURL.Scheme == "file")
// }
|
[
"\"KDEPLOY_KUBEWARE\"",
"\"KDEPLOY_NAMESPACE\"",
"\"KDEPLOY_KUBEWARE\""
] |
[] |
[
"KDEPLOY_NAMESPACE",
"KDEPLOY_KUBEWARE"
] |
[]
|
["KDEPLOY_NAMESPACE", "KDEPLOY_KUBEWARE"]
|
go
| 2 | 0 | |
swd/swd/settings.py
|
"""
Django settings for swd project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from .config import PRODUCTION, DB_NAME, DB_PASSWORD, DB_USER
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
from tools.dev_info import SECRET_KEY
from tools.dev_info import EMAIL_HOST_PASSWORD
# production = True if "PROD" in os.environ and os.environ.get("PROD") == "True" else False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if PRODUCTION:
ALLOWED_HOSTS = ['10.10.10.121', 'newswd.bits-goa.ac.in']
else:
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = [
'jet.dashboard',
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webpack_loader',
'main',
'tools',
'schema',
'graphene_django',
'rest_framework',
'corsheaders',
'braces',
'import_export',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
CORS_ORIGIN_ALLOW_ALL = True
# during production need to set it to expire after some time
JWT_VERIFY_EXPIRATION = False
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'swd.middleware.JWTMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if PRODUCTION:
AUTHENTICATION_BACKENDS = (
'main.auth_backend.LDAPAuthBackend',
'django.contrib.auth.backends.ModelBackend'
)
else:
AUTHENTICATION_BACKENDS = (
'main.auth_backend.LDAPAuthBackend',
'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = 'swd.urls'
GRAPHENE = {
'SCHEMA': 'swd.schema.schema'
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'swd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if PRODUCTION:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CONSTANTS_LOCATION = os.path.join(MEDIA_ROOT, 'constants.json')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'assets'),
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
LOGIN_URL = '/login'
# STATIC_ROOT = os.path.join(BASE_DIR, "static")
DATA_UPLOAD_MAX_NUMBER_FIELDS = 5000
|
[] |
[] |
[
"PROD"
] |
[]
|
["PROD"]
|
python
| 1 | 0 | |
tasks/process_agent.py
|
import datetime
import os
import re
import shutil
import sys
from invoke import task
from subprocess import check_output
from .utils import bin_name, get_gopath, get_build_flags, REPO_PATH, get_version, get_git_branch_name, get_go_version, get_git_commit, get_version_numeric_only
from .build_tags import get_default_build_tags
BIN_DIR = os.path.join(".", "bin", "process-agent")
BIN_PATH = os.path.join(BIN_DIR, bin_name("process-agent", android=False))
GIMME_ENV_VARS = ['GOROOT', 'PATH']
@task
def build(ctx, race=False, go_version=None, incremental_build=False,
major_version='7', python_runtimes='3', arch="x64"):
"""
Build the process agent
"""
ldflags, gcflags, env = get_build_flags(ctx, arch=arch, major_version=major_version, python_runtimes=python_runtimes)
# generate windows resources
if sys.platform == 'win32':
windres_target = "pe-x86-64"
if arch == "x86":
env["GOARCH"] = "386"
windres_target = "pe-i386"
ver = get_version_numeric_only(ctx, env, major_version=major_version)
maj_ver, min_ver, patch_ver = ver.split(".")
resdir = os.path.join(".", "cmd", "process-agent", "windows_resources")
ctx.run("windmc --target {target_arch} -r {resdir} {resdir}/process-agent-msg.mc".format(
resdir=resdir,
target_arch=windres_target
))
ctx.run("windres --define MAJ_VER={maj_ver} --define MIN_VER={min_ver} --define PATCH_VER={patch_ver} -i cmd/process-agent/windows_resources/process-agent.rc --target {target_arch} -O coff -o cmd/process-agent/rsrc.syso".format(
maj_ver=maj_ver,
min_ver=min_ver,
patch_ver=patch_ver,
target_arch=windres_target
))
# TODO use pkg/version for this
main = "main."
ld_vars = {
"Version": get_version(ctx, major_version=major_version),
"GoVersion": get_go_version(),
"GitBranch": get_git_branch_name(),
"GitCommit": get_git_commit(),
"BuildDate": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
}
goenv = {}
# TODO: this is a temporary workaround to avoid the garbage collection issues that the process-agent+go1.11 have had.
# Once we have upgraded the go version to 1.12, this can be removed
if go_version:
lines = ctx.run("gimme {version}".format(version=go_version)).stdout.split("\n")
for line in lines:
for env_var in GIMME_ENV_VARS:
if env_var in line:
goenv[env_var] = line[line.find(env_var)+len(env_var)+1:-1].strip('\'\"')
ld_vars["GoVersion"] = go_version
# extend PATH from gimme with the one from get_build_flags
if "PATH" in os.environ and "PATH" in goenv:
goenv["PATH"] += ":" + os.environ["PATH"]
env.update(goenv)
ldflags += ' '.join(["-X '{name}={value}'".format(name=main+key, value=value) for key, value in ld_vars.items()])
build_tags = get_default_build_tags(puppy=False, process=True)
## secrets is not supported on windows because the process agent still runs as
## root. No matter what `get_default_build_tags()` returns, take secrets out.
if sys.platform == 'win32' and "secrets" in build_tags:
build_tags.remove("secrets")
# TODO static option
cmd = 'go build {race_opt} {build_type} -tags "{go_build_tags}" '
cmd += '-o {agent_bin} -gcflags="{gcflags}" -ldflags="{ldflags}" {REPO_PATH}/cmd/process-agent'
args = {
"race_opt": "-race" if race else "",
"build_type": "" if incremental_build else "-a",
"go_build_tags": " ".join(build_tags),
"agent_bin": BIN_PATH,
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
benchmarks_sphere/paper_jrn_sl_exp/test_compare_wt_dt_vs_accuracy_galewsky_M256_6hours_l_n_uv/benchmark_create_jobs.py
|
#! /usr/bin/env python3
import os
import sys
import math
from itertools import product
# REXI
from mule_local.rexi.REXICoefficients import *
from mule_local.rexi.trexi.TREXI import *
from mule_local.rexi.cirexi.CIREXI import *
from mule_local.rexi.brexi.BREXI import *
efloat_mode = "float"
#efloat_mode = "mpfloat"
from mule_local.JobGeneration import *
from mule.JobParallelization import *
from mule.JobParallelizationDimOptions import *
jg = JobGeneration()
verbose = False
#verbose = True
##################################################
##################################################
jg.compile.mode = 'release'
if '_gnu' in os.getenv('MULE_PLATFORM_ID'):
jg.compile.compiler = 'gnu'
else:
jg.compile.compiler = 'intel'
jg.compile.sweet_mpi = 'enable'
jg.parallelization.core_oversubscription = False
jg.parallelization.core_affinity = 'compact'
jg.compile.threading = 'omp'
jg.compile.rexi_thread_parallel_sum = 'disable'
gen_reference_solution = True
jg.runtime.max_simulation_time = 60*60*6
#jg.runtime.max_simulation_time = 30*16
jg.runtime.max_wallclock_time = 30*60 # 30 minutes max
#space_res_spectral_ = [64, 128, 256]
space_res_spectral_ = [256]
# Reference time step size
timestep_size_reference = 5
params_timestep_sizes_explicit = [15/2*(2**i) for i in range(10)]
params_timestep_sizes_implicit = [15/2*(2**i) for i in range(10)]
params_timestep_sizes_exp = [15/2*(2**i) for i in range(9)]
# Parallelization
params_pspace_num_cores_per_rank = [jg.platform_resources.num_cores_per_socket]
#params_pspace_num_threads_per_rank = [i for i in range(1, jg.platform_resources.num_cores_per_socket+1)]
params_pspace_num_threads_per_rank = [jg.platform_resources.num_cores_per_socket]
params_ptime_num_cores_per_rank = [1]
unique_id_filter = []
#unique_id_filter.append('simparams')
unique_id_filter.append('compile')
unique_id_filter.append('runtime.disc_space')
unique_id_filter.append('runtime.timestep_order')
#unique_id_filter.append('timestep_size')
unique_id_filter.append('runtime.rexi')
unique_id_filter.append('runtime.benchmark')
unique_id_filter.append('parallelization')
jg.unique_id_filter = unique_id_filter
jg.runtime.output_timestep_size = jg.runtime.max_simulation_time
# No output
#jg.runtime.output_filename = "-"
#
# Force deactivating Turbo mode
#
jg.parallelization.force_turbo_off = True
jg.compile.lapack = 'enable'
jg.compile.mkl = 'disable'
# Request dedicated compile script
jg.compilecommand_in_jobscript = False
#
# Run simulation on plane or sphere
#
jg.compile.program = 'swe_sphere'
jg.compile.plane_spectral_space = 'disable'
jg.compile.plane_spectral_dealiasing = 'disable'
jg.compile.sphere_spectral_space = 'enable'
jg.compile.sphere_spectral_dealiasing = 'enable'
jg.compile.benchmark_timings = 'enable'
jg.compile.quadmath = 'disable'
#
# Activate Fortran source
#
jg.compile.fortran_source = 'enable'
# Verbosity mode
jg.runtime.verbosity = 0
#
# Benchmark
#
jg.runtime.benchmark_name = "galewsky"
#
# Binary output
#
jg.runtime.output_file_mode = "bin"
#
# Compute error
#
jg.runtime.compute_error = 0
#
# Preallocate the REXI matrices
#
jg.runtime.rexi_sphere_preallocation = 1
# Leave instability checks activated
# Don't activate them since they are pretty costly!!!
jg.runtime.instability_checks = 0
#
# REXI method
# N=64, SX,SY=50 and MU=0 with circle primitive provide good results
#
jg.runtime.rexi_method = ''
jg.runtime.viscosity = 0.0
jg.runtime.rexi_method = 'direct'
def estimateWallclockTime(jg):
if jg.reference_job:
return 2*24*60*60
return 1*60*60
#
# allow including this file
#
if __name__ == "__main__":
ts_methods = [
['ln_erk_split_uv', 4, 4, 0],
###########
# Runge-Kutta
###########
['ln_erk_split_aa_uv', 2, 2, 0],
['ln_erk_split_uv', 2, 2, 0],
###########
# SETTLS variants
###########
['l_irk_na_sl_nr_settls_uv_only', 2, 2, 0],
['l_irk_na_sl_nr_settls_ver0_uv', 2, 2, 0],
['l_irk_na_sl_nr_settls_ver1_uv', 2, 2, 0],
['lg_irk_na_sl_lc_nr_settls_ver0_uv', 2, 2, 0],
['lg_irk_na_sl_lc_nr_settls_ver1_uv', 2, 2, 0],
###########
# EXP variants
###########
['lg_exp_na_sl_lc_nr_settls_ver0_uv', 2, 2, 0],
['lg_exp_na_sl_lc_nr_settls_ver1_uv', 2, 2, 0],
]
for space_res_spectral in space_res_spectral_:
jg.runtime.space_res_spectral = space_res_spectral
#jg.runtime.reuse_plans = 2 # enforce using plans (todo, enforcing not yet implemented)!
#
# Reference solution
#
if gen_reference_solution:
tsm = ts_methods[0]
jg.runtime.timestep_size = timestep_size_reference
jg.runtime.timestepping_method = tsm[0]
jg.runtime.timestepping_order = tsm[1]
jg.runtime.timestepping_order2 = tsm[2]
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = 1
pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1]
pspace.num_ranks = 1
# Setup parallelization
jg.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
jg.parallelization.print()
if len(tsm) > 4:
s = tsm[4]
jg.load_from_dict(tsm[4])
jg.reference_job = True
jg.parallelization.max_wallclock_seconds = estimateWallclockTime(jg)
_a = jg.runtime.max_wallclock_time
jg.runtime.max_wallclock_time = 2*24*60*60 # 30 minutes max
jg.reference_job_unique_id = None
jg.gen_jobscript_directory('job_benchref_'+jg.getUniqueID())
jg.reference_job = False
jg.runtime.max_wallclock_time = _a
jg.reference_job_unique_id = jg.job_unique_id
#
# Create job scripts
#
for tsm in ts_methods[1:]:
jg.runtime.timestepping_method = tsm[0]
jg.runtime.timestepping_order = tsm[1]
jg.runtime.timestepping_order2 = tsm[2]
if len(tsm) > 4:
s = tsm[4]
jg.runtime.load_from_dict(tsm[4])
exp_integrator = False
tsm_name = tsm[0]
if 'l_erk' in tsm_name or 'lg_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit
elif 'l_na_erk' in tsm_name or 'ln_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit
elif '_irk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_implicit
elif '_rexi' in tsm_name or '_exp' in tsm_name:
params_timestep_sizes = params_timestep_sizes_exp
exp_integrator = True
else:
raise Exception("Unable to identify time stepping method "+tsm_name)
for pspace_num_cores_per_rank, pspace_num_threads_per_rank, jg.runtime.timestep_size in product(params_pspace_num_cores_per_rank, params_pspace_num_threads_per_rank, params_timestep_sizes):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
#if not exp_integrator or 'lg_' in tsm_name:
if True:
# Always use direct REXI method if no parallel-in-time
jg.runtime.rexi_method = 'direct'
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
ptime.setup()
jg.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
jg.parallelization.print()
jg.parallelization.max_wallclock_seconds = estimateWallclockTime(jg)
jg.gen_jobscript_directory('job_bench_'+jg.getUniqueID())
else:
raise Exception("This branch shouldn't be taken, yet")
#
# SHTNS plan generation scripts
#
#jg.runtime.reuse_plans = 1 # search for awesome plans and store them
#
# Create dummy scripts to be used for SHTNS script generation
#
# No parallelization in time
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1
ptime.num_ranks = 1
ptime.setup()
for tsm in ts_methods[1:2]:
jg.runtime.timestepping_method = tsm[0]
jg.runtime.timestepping_order = tsm[1]
jg.runtime.timestepping_order2 = tsm[2]
if not '_rexi' in jg.runtime.timestepping_method:
jg.runtime.rexi_method = ''
else:
jg.runtime.rexi_method = 'ci'
if len(tsm) > 4:
s = tsm[4]
jg.runtime.load_from_dict(tsm[4])
for pspace_num_cores_per_rank, pspace_num_threads_per_rank, jg.runtime.timestep_size in product(params_pspace_num_cores_per_rank, params_pspace_num_threads_per_rank, [params_timestep_sizes_explicit[0]]):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
jg.setup_parallelization([pspace, ptime])
# Use 10 minutes per default to generate plans
jg.parallelization.max_wallclock_seconds = 60*10
# Set simtime to 0
#jg.runtime.max_simulation_time = 0
# No output
jg.runtime.output_timestep_size = -1
jg.runtime.output_filename = "-"
jobdir = 'job_plan_'+jg.getUniqueID()
jg.gen_jobscript_directory(jobdir)
# Write compile script
jg.write_compilecommands("./compile_platform_"+jg.platforms.platform_id+".sh")
print("")
print("Timestepping methods:")
m = [i[0] for i in ts_methods[1:]]
m.sort()
for i in m:
print(i)
print("")
|
[] |
[] |
[
"MULE_PLATFORM_ID"
] |
[]
|
["MULE_PLATFORM_ID"]
|
python
| 1 | 0 | |
SAW/proof/AES/AES-GCM-check-entrypoint.go
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package main
import (
utility "aws-lc-verification/proof/common"
"log"
"math"
"os"
"sync"
)
// The AES GCM proofs use approximately 7 gb of memory each, round up to 8 gb for headroom
const memory_used_per_test uint64 = 8e9
func main() {
log.Printf("Started AES-GCM check.")
// When 'AES_GCM_SELECTCHECK' is undefined, quickcheck is executed.
env_var := os.Getenv("AES_GCM_SELECTCHECK")
if len(env_var) == 0 {
utility.RunSawScript("verify-AES-GCM-quickcheck.saw")
return
}
selectcheck_range_start := utility.ParseSelectCheckRange("AES_GCM_SELECTCHECK_START", 1)
selectcheck_range_end := utility.ParseSelectCheckRange("AES_GCM_SELECTCHECK_END", 384)
// When 'AES_GCM_SELECTCHECK' is defined, formal verification is executed with different `evp_cipher_update_len`.
// Generate saw scripts based on the verification template and evp_cipher_update_len range [1, 384].
var wg sync.WaitGroup
process_count := 0
total_memory := utility.SystemMemory()
num_parallel_process := int(math.Floor((float64(total_memory) / float64(memory_used_per_test))))
log.Printf("System has %d bytes of memory, running %d jobs in parallel", total_memory, num_parallel_process)
for i := selectcheck_range_start; i <= selectcheck_range_end; i++ {
wg.Add(1)
saw_template := "verify-AES-GCM-selectcheck-template.txt"
placeholder_name := "TARGET_LEN_PLACEHOLDER"
go utility.CreateAndRunSawScript(saw_template, placeholder_name, i, &wg)
utility.Wait(&process_count, num_parallel_process, &wg)
}
wg.Wait()
log.Printf("Completed AES-GCM check.")
}
|
[
"\"AES_GCM_SELECTCHECK\""
] |
[] |
[
"AES_GCM_SELECTCHECK"
] |
[]
|
["AES_GCM_SELECTCHECK"]
|
go
| 1 | 0 | |
setup.py
|
import os
import sys
import glob
import os.path as osp
from itertools import product
from setuptools import setup, find_packages
import torch
from torch.__config__ import parallel_info
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME
WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu']
if os.getenv('FORCE_CUDA', '0') == '1':
suffices = ['cuda', 'cpu']
if os.getenv('FORCE_ONLY_CUDA', '0') == '1':
suffices = ['cuda']
if os.getenv('FORCE_ONLY_CPU', '0') == '1':
suffices = ['cpu']
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
WITH_METIS = True if os.getenv('WITH_METIS', '0') == '1' else False
WITH_MTMETIS = True if os.getenv('WITH_MTMETIS', '0') == '1' else False
def get_extensions():
extensions = []
extensions_dir = osp.join('csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
for main, suffix in product(main_files, suffices):
define_macros = []
libraries = []
if WITH_METIS:
define_macros += [('WITH_METIS', None)]
libraries += ['metis']
if WITH_MTMETIS:
define_macros += [('WITH_MTMETIS', None)]
define_macros += [('MTMETIS_64BIT_VERTICES', None)]
define_macros += [('MTMETIS_64BIT_EDGES', None)]
define_macros += [('MTMETIS_64BIT_WEIGHTS', None)]
define_macros += [('MTMETIS_64BIT_PARTITIONS', None)]
libraries += ['mtmetis', 'wildriver']
extra_compile_args = {'cxx': ['-O2']}
extra_link_args = ['-s']
info = parallel_info()
if ('backend: OpenMP' in info and 'OpenMP not found' not in info
and sys.platform != 'darwin'):
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if sys.platform == 'win32':
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if suffix == 'cuda':
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
nvcc_flags += ['--expt-relaxed-constexpr', '-O2']
extra_compile_args['nvcc'] = nvcc_flags
if sys.platform == 'win32':
extra_link_args += ['cusparse.lib']
else:
extra_link_args += ['-lcusparse', '-l', 'cusparse']
name = main.split(os.sep)[-1][:-4]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if suffix == 'cuda' and osp.exists(path):
sources += [path]
Extension = CppExtension if suffix == 'cpu' else CUDAExtension
extension = Extension(
f'torch_sparse._{name}_{suffix}',
sources,
include_dirs=[extensions_dir],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=libraries,
)
extensions += [extension]
return extensions
install_requires = ['scipy']
setup_requires = []
tests_require = ['pytest', 'pytest-runner', 'pytest-cov']
setup(
name='torch_sparse',
version='0.6.12',
author='Matthias Fey',
author_email='[email protected]',
url='https://github.com/rusty1s/pytorch_sparse',
description=('PyTorch Extension Library of Optimized Autograd Sparse '
'Matrix Operations'),
keywords=['pytorch', 'sparse', 'sparse-matrices', 'autograd'],
license='MIT',
python_requires='>=3.6',
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require={'test': tests_require},
ext_modules=get_extensions() if not BUILD_DOCS else [],
cmdclass={
'build_ext':
BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False)
},
packages=find_packages(),
)
|
[] |
[] |
[
"WITH_METIS",
"WITH_MTMETIS",
"FORCE_CUDA",
"BUILD_DOCS",
"FORCE_ONLY_CPU",
"NVCC_FLAGS",
"FORCE_ONLY_CUDA"
] |
[]
|
["WITH_METIS", "WITH_MTMETIS", "FORCE_CUDA", "BUILD_DOCS", "FORCE_ONLY_CPU", "NVCC_FLAGS", "FORCE_ONLY_CUDA"]
|
python
| 7 | 0 | |
plugin/pytorch_bigtable/tests/test_write_tensor.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# disable module docstring for tests
# pylint: disable=C0114
# disable class docstring for tests
# pylint: disable=C0115
# disable unused parameter for callback
# pylint: disable=W0613
import random
import unittest
import torch
import os
from .bigtable_emulator import BigtableEmulator
from pytorch_bigtable import BigtableClient, row_set, row_range
class BigtableWriteTest(unittest.TestCase):
def setUp(self):
self.emulator = BigtableEmulator()
def tearDown(self):
self.emulator.stop()
def test_write_arguments(self):
os.environ["BIGTABLE_EMULATOR_HOST"] = self.emulator.get_addr()
self.emulator.create_table("fake_project", "fake_instance", "test-table",
["fam1", "fam2"])
ten = torch.Tensor(list(range(40))).reshape(20, 2)
client = BigtableClient("fake_project", "fake_instance",
endpoint=self.emulator.get_addr())
table = client.get_table("test-table")
# empty columns
self.assertRaises(ValueError, table.write_tensor, ten, [],
["row" + str(i).rjust(3, "0") for i in range(20)])
# not enough columns
self.assertRaises(ValueError, table.write_tensor, ten, ["fam1:c1"],
["row" + str(i).rjust(3, "0") for i in range(20)])
# too many columns
self.assertRaises(ValueError, table.write_tensor, ten,
["fam1:c1", "fam1:c2", "fam1:c2"],
["row" + str(i).rjust(3, "0") for i in range(20)])
# columns without families
self.assertRaises(ValueError, table.write_tensor, ten, ["c1", "c2"],
["row" + str(i).rjust(3, "0") for i in range(20)])
# not enough row_keys
self.assertRaises(ValueError, table.write_tensor, ten,
["fam1:c1", "fam1:c2"],
["row" + str(i).rjust(3, "0") for i in range(10)])
self.assertRaises(ValueError, table.write_tensor, ten[0],
["fam1:c1", "fam1:c2"], ["row000"])
# non existing family
self.assertRaises(RuntimeError, table.write_tensor, ten,
["fam3:c1", "fam3:c2"],
["row" + str(i).rjust(3, "0") for i in range(20)])
def test_write_single_row(self):
os.environ["BIGTABLE_EMULATOR_HOST"] = self.emulator.get_addr()
self.emulator.create_table("fake_project", "fake_instance", "test-table",
["fam1", "fam2"])
ten = torch.tensor([1, 2], dtype=torch.float32).reshape(1, -1)
client = BigtableClient("fake_project", "fake_instance",
endpoint=self.emulator.get_addr())
table = client.get_table("test-table")
table.write_tensor(ten, ["fam1:col1", "fam1:col2"], ["row000"])
result = next(iter(
table.read_rows(torch.float32, ["fam1:col1", "fam1:col2"],
row_set.from_rows_or_ranges(row_range.infinite()))))
self.assertTrue((ten == result).all().item())
def test_write_single_column(self):
os.environ["BIGTABLE_EMULATOR_HOST"] = self.emulator.get_addr()
self.emulator.create_table("fake_project", "fake_instance", "test-table",
["fam1", "fam2"])
ten = torch.Tensor(list(range(40))).reshape(-1, 1)
client = BigtableClient("fake_project", "fake_instance",
endpoint=self.emulator.get_addr())
table = client.get_table("test-table")
table.write_tensor(ten, ["fam1:col1"],
["row" + str(i).rjust(3, "0") for i in range(40)])
results = []
for tensor in table.read_rows(torch.float32, ["fam1:col1"],
row_set.from_rows_or_ranges(
row_range.infinite()), default_value=0):
results.append(tensor.reshape(1, -1))
result = torch.cat(results)
self.assertTrue((ten == result).all().item())
def test_write_callback(self):
os.environ["BIGTABLE_EMULATOR_HOST"] = self.emulator.get_addr()
self.emulator.create_table("fake_project", "fake_instance", "test-table",
["fam1", "fam2"])
ten = torch.Tensor(list(range(40))).reshape(20, 2)
client = BigtableClient("fake_project", "fake_instance",
endpoint=self.emulator.get_addr())
table = client.get_table("test-table")
row_keys_list = ["row" + str(random.randint(1000, 9999)).rjust(4, "0") for _
in range(20)]
def row_callback(tensor, index):
return row_keys_list[index]
table.write_tensor(ten, ["fam1:col1", "fam2:col2"], row_callback)
results = []
for tensor in table.read_rows(torch.float32, ["fam1:col1", "fam2:col2"],
row_set.from_rows_or_ranges(*row_keys_list)):
results.append(tensor.reshape(1, -1))
results = sorted(results, key=lambda x: x[0, 0].item())
result = torch.cat(results)
self.assertTrue((result.nan_to_num(0) == ten.nan_to_num(0)).all().item())
|
[] |
[] |
[
"BIGTABLE_EMULATOR_HOST"
] |
[]
|
["BIGTABLE_EMULATOR_HOST"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"log"
"myblog/app"
"net/http"
"os"
"path/filepath"
"github.com/gobuffalo/packr/v2"
"github.com/gorilla/mux"
"github.com/unrolled/secure"
)
var path, _ = os.Executable()
var baseDir = filepath.Dir(path)
var staticDir = baseDir + "/web/static/"
var staticBox = packr.New("Static", "./web/static")
func main() {
router := mux.NewRouter()
router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
log.Println(r.Header)
log.Println("Healthcheck /healthz")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "ok")
})
// Redirect to HTTPS in prod
sslRedirect := (os.Getenv("SSL_REDIRECT") == "true")
secureMiddleware := secure.New(secure.Options{
SSLProxyHeaders: map[string]string{"X-Forwarded-Proto": "https"},
SSLRedirect: sslRedirect,
})
subRouter := router.PathPrefix("/").Subrouter()
subRouter.Use(secureMiddleware.Handler)
subRouter.HandleFunc("/", app.HomeHandler).Methods("GET")
subRouter.HandleFunc("/about", app.AboutHandler).Methods("GET")
subRouter.HandleFunc("/posts/{slug}", app.PostHandler).Methods("GET")
subRouter.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(staticBox)))
router.PathPrefix("/").HandlerFunc(app.CatchAllHandler)
log.Println("Listening on port " + os.Getenv("PORT"))
http.ListenAndServe(":"+os.Getenv("PORT"), router)
}
|
[
"\"SSL_REDIRECT\"",
"\"PORT\"",
"\"PORT\""
] |
[] |
[
"PORT",
"SSL_REDIRECT"
] |
[]
|
["PORT", "SSL_REDIRECT"]
|
go
| 2 | 0 | |
skinnywms/wmssvr.py
|
# (C) Copyright 2012-2019 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import os
import argparse
from flask import Flask, request, Response, render_template, send_file, jsonify
from .server import WMSServer
from .plot.magics import Plotter, Styler
from .data.fs import Availability
application = Flask(__name__)
demo = os.path.join(os.path.dirname(__file__), "testdata", "sfc.grib")
demo = os.environ.get("SKINNYWMS_DATA_PATH", demo)
parser = argparse.ArgumentParser(description="Simple WMS server")
parser.add_argument(
"-f",
"--path",
default=demo,
help="Path to a GRIB or NetCDF file, or a directory\
containing GRIB and/or NetCDF files.",
)
parser.add_argument(
"--style", default="", help="Path to a directory where to find the styles"
)
parser.add_argument(
"--user_style", default="", help="Path to a json file containing the style to use"
)
parser.add_argument("--host", default="127.0.0.1", help="Hostname")
parser.add_argument("--port", default=5000, help="Port number")
parser.add_argument(
"--baselayer", default="", help="Path to a directory where to find the baselayer"
)
parser.add_argument(
"--magics-prefix",
default="magics",
help="prefix used to pass information to magics",
)
args = parser.parse_args()
if args.style != "":
os.environ["MAGICS_STYLE_PATH"] = args.style + ":ecmwf"
if args.user_style != "":
os.environ["MAGICS_USER_STYLE_PATH"] = args.user_style
server = WMSServer(Availability(args.path), Plotter(args.baselayer), Styler(args.user_style))
server.magics_prefix = args.magics_prefix
@application.route("/wms", methods=["GET"])
def wms():
return server.process(
request,
Response=Response,
send_file=send_file,
render_template=render_template,
reraise=True,
)
@application.route("/availability", methods=["GET"])
def availability():
return jsonify(server.availability.as_dict())
@application.route("/", methods=["GET"])
def index():
return render_template("leaflet_demo.html")
def execute():
application.run(port=args.port, host=args.host, debug=True, threaded=False)
|
[] |
[] |
[
"SKINNYWMS_DATA_PATH",
"MAGICS_USER_STYLE_PATH",
"MAGICS_STYLE_PATH"
] |
[]
|
["SKINNYWMS_DATA_PATH", "MAGICS_USER_STYLE_PATH", "MAGICS_STYLE_PATH"]
|
python
| 3 | 0 | |
connector/isliteral_test.go
|
package connector
import "testing"
func TestIsLiteral(t *testing.T) {
cases := []struct {
plaintext string
want bool
}{
{"A", true},
{"~A", true},
{"A^B", false},
{"AvB", false},
}
for _, c := range cases {
got := Parse(c.plaintext).isLiteral()
want := c.want
if got != want {
t.Errorf("Parse(%q).isLiteral(): %t != %t", c.plaintext, got, want)
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
animeDjangoApp/asgi.py
|
"""
ASGI config for animeDjangoApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animeDjangoApp.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
initialize_datadir,
log_filename,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework(object):
"""Base class for a tgk test script.
Individual tgk test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave tgkds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop tgkds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing tgkd/tgk-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: tgkds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir))
def start_node(self, i, extra_args=None, stderr=None):
"""Start a tgkd"""
node = self.nodes[i]
node.start(extra_args, stderr)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None):
"""Start multiple tgkds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i])
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'tgkd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "tgkd should have exited with an error"
else:
assert_msg = "tgkd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("TGKRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(self.options.cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(self.options.cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(self.options.cachedir, "node" + str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("TGKD", "tgkd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(self.options.cachedir, i, "debug.log"))
os.remove(log_filename(self.options.cachedir, i, "db.log"))
os.remove(log_filename(self.options.cachedir, i, "peers.dat"))
os.remove(log_filename(self.options.cachedir, i, "fee_estimates.dat"))
for i in range(self.num_nodes):
from_dir = os.path.join(self.options.cachedir, "node" + str(i))
to_dir = os.path.join(self.options.tmpdir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some tgkd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("TGKD", "tgkd"),
help="tgkd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("TGKD", "tgkd"),
help="tgkd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
[] |
[] |
[
"TGKD",
"PYTHON_DEBUG",
"PATH"
] |
[]
|
["TGKD", "PYTHON_DEBUG", "PATH"]
|
python
| 3 | 0 | |
src/sysutil/util.go
|
//Package sysutil provides useful functions to interact with the system.
package sysutil
import (
"bufio"
"fmt"
"os"
"os/exec"
"strings"
)
const (
DEFAULT_EDITOR = "vim"
DEFAULT_YES = "[Y/n]"
DEFAULT_NO = "[y/N]"
AUTHOR = "B. VAUDOUR"
VERSION = "0.81.2"
KCP_LOCK = "kcp.lock"
KCP_DB = ".kcp.json"
LOCALE_DIR = "/usr/share/locale"
)
//LaunchCommand launches a system command.
func LaunchCommand(name string, args ...string) error {
cmd := exec.Command(name, args...)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
return cmd.Run()
}
//GetOuptutCommand returns the redirected output of a system command.
func GetOutputCommand(name string, args ...string) ([]byte, error) {
cmd := exec.Command(name, args...)
return cmd.Output()
}
//Edit lets the user edit the given file.
func EditFile(f string) error {
editor := os.Getenv("EDITOR")
if editor == "" {
editor = DEFAULT_EDITOR
}
return LaunchCommand(editor, f)
}
//InstalledVersion returns the installed version of a package.
func InstalledVersion(app string) string {
if b, e := GetOutputCommand("pacman", "-Q", app); e == nil {
f := strings.Fields(string(b))
if len(f) >= 2 {
return f[1]
}
}
return ""
}
//Question displays a question to the output and returns the response given by the user.
func Question(msg string) string {
fmt.Print(msg + " ")
sc := bufio.NewScanner(os.Stdin)
sc.Scan()
return strings.TrimSpace(sc.Text())
}
//QuestionYN displays a question to the output and returns the boolean response given by the user.
func QuestionYN(msg string, defaultResponse bool) bool {
defstr, resp := DEFAULT_YES, ""
if !defaultResponse {
defstr = "[y/N]"
}
fmt.Printf("\033[1;33m%s %s \033[m", msg, defstr)
if _, e := fmt.Scanf("%v", &resp); e != nil || len(resp) == 0 {
return defaultResponse
}
resp = strings.ToLower(resp)
switch {
case strings.HasPrefix(resp, "y"):
return true
case strings.HasPrefix(resp, "n"):
return false
default:
return defaultResponse
}
}
//PrintError print a red message in the stderr.
func PrintError(e interface{}) { fmt.Fprintf(os.Stderr, "\033[1;31m%v\033[m\n", e) }
//PrintWarning print a yellow message in the stderr.
func PrintWarning(e interface{}) {
fmt.Fprintf(os.Stderr, "\033[1;33m%v\033[m\n", e)
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
test_sqlx.go
|
package main
import (
"context"
"fmt"
"os"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
)
var db *sqlx.DB
func init() {
var err error
db, err = sqlx.Connect(
"postgres",
fmt.Sprintf("user=%s dbname=%s sslmode=%s", os.Getenv("DATABASE_USER"), os.Getenv("DATABASE_NAME"), os.Getenv("PGSSLMODE")),
)
if err != nil {
panic(err)
}
db.MustExec(`INSERT INTO users (name) VALUES ($1)`, "testuser")
}
func testSqlxUtils() {
printSubSection("SqlxSelect", testSqlxSelect)
}
func testSqlxSelect() {
snippet(
func() interface{} {
type User struct {
ID uint64
Name string
}
ctx := context.Background()
users, err := sqlxSelect[*User](ctx, db, "SELECT * FROM users")
if err != nil {
panic(err)
}
return users
},
)
}
|
[
"\"DATABASE_USER\"",
"\"DATABASE_NAME\"",
"\"PGSSLMODE\""
] |
[] |
[
"PGSSLMODE",
"DATABASE_NAME",
"DATABASE_USER"
] |
[]
|
["PGSSLMODE", "DATABASE_NAME", "DATABASE_USER"]
|
go
| 3 | 0 | |
storage/mountscan/mountscan_test.go
|
package mountscan
import (
"os"
"os/exec"
"github.com/Sirupsen/logrus"
. "testing"
. "gopkg.in/check.v1"
)
type mountscanSuite struct{}
var _ = Suite(&mountscanSuite{})
func TestMountscan(t *T) { TestingT(t) }
func (s *mountscanSuite) SetUpTest(c *C) {
if os.Getenv("DEBUG") != "" {
logrus.SetLevel(logrus.DebugLevel)
}
}
func (s *mountscanSuite) TestGetMounts(c *C) {
srcDir := "/tmp/src"
targetDir := "/tmp/target"
c.Assert(exec.Command("mkdir", "-p", srcDir).Run(), IsNil)
c.Assert(exec.Command("mkdir", "-p", targetDir).Run(), IsNil)
c.Assert(exec.Command("mount", "--bind", srcDir, targetDir).Run(), IsNil)
hostMounts, err := GetMounts(&GetMountsRequest{DriverName: "none", KernelDriver: "device-mapper"})
c.Assert(err, IsNil)
found := false
for _, hostMount := range hostMounts {
if found = (hostMount.MountPoint == targetDir && hostMount.Root == srcDir); found {
break
}
}
c.Assert(exec.Command("umount", targetDir).Run(), IsNil)
c.Assert(exec.Command("rm", "-r", targetDir).Run(), IsNil)
c.Assert(exec.Command("rm", "-r", srcDir).Run(), IsNil)
c.Assert(found, Equals, true)
}
func (s *mountscanSuite) TestGetMountsInput(c *C) {
_, err := GetMounts(&GetMountsRequest{DriverName: "nfs", FsType: "nfs4"})
c.Assert(err, IsNil)
_, err = GetMounts(&GetMountsRequest{DriverName: "ceph", KernelDriver: "rbd"})
c.Assert(err, IsNil)
_, err = GetMounts(&GetMountsRequest{DriverName: "none", KernelDriver: "device-mapper"})
c.Assert(err, IsNil)
_, err = GetMounts(&GetMountsRequest{DriverName: ""})
c.Assert(err, ErrorMatches, ".*DriverName is required.*")
_, err = GetMounts(&GetMountsRequest{DriverName: "nfs"})
c.Assert(err, ErrorMatches, ".*Filesystem type is required.*")
_, err = GetMounts(&GetMountsRequest{DriverName: "ceph"})
c.Assert(err, ErrorMatches, ".*Kernel driver is required.*")
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
ip-messaging/rest/services/create-service/create-service.7.x.java
|
// Install the Java helper library from twilio.com/docs/java/install
import com.twilio.Twilio;
import com.twilio.rest.chat.v2.Service;
public class Example {
// Get your Account SID and Auth Token from https://twilio.com/console
// To set up environment variables, see http://twil.io/secure
public static final String ACCOUNT_SID = System.getenv("TWILIO_ACCOUNT_SID");
public static final String AUTH_TOKEN = System.getenv("TWILIO_AUTH_TOKEN");
public static void main(String[] args) {
// Initialize the client
Twilio.init(ACCOUNT_SID, AUTH_TOKEN);
// Create the service
Service service = Service.creator("ServiceName").create();
System.out.println(service.getFriendlyName());
}
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
java
| 2 | 0 | |
Binance Detect Moonings.py
|
# use for environment variables
import os
# needed for the binance API and websockets
from binance.client import Client
# used for dates
from datetime import datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Switch between testnet and mainnet
# Setting this to False will use REAL funds, use at your own risk
# Define your API keys below in order for the toggle to work
TESTNET = True
# Get binance key and secret for TEST and MAINNET
# The keys below are pulled from environment variables using os.getenv
# Simply remove this and use the following format instead: api_key_test = 'YOUR_API_KEY'
api_key_test = os.getenv('binance_api_stalkbot_testnet')
api_secret_test = os.getenv('binance_secret_stalkbot_testnet')
api_key_live = os.getenv('binance_api_stalkbot_live')
api_secret_live = os.getenv('binance_secret_stalkbot_live')
# Authenticate with the client
if TESTNET:
client = Client(api_key_test, api_secret_test)
# The API URL needs to be manually changed in the library to work on the TESTNET
client.API_URL = 'https://testnet.binance.vision/api'
else:
client = Client(api_key_live, api_secret_live)
####################################################
# USER INPUTS #
# You may edit to adjust the parameters of the bot #
####################################################
# select what to pair the coins to and pull all coins paied with PAIR_WITH
PAIR_WITH = 'USDT'
# Define the size of each trade, by default in USDT
QUANTITY = 100
# List of pairs to exlcude
# by default we're excluding the most popular fiat pairs
# and some margin keywords, as we're only working on the SPOT account
FIATS = ['EURUSDT', 'GBPUSDT', 'JPYUSDT', 'USDUSDT', 'DOWN', 'UP']
# the amount of time in MINUTES to calculate the differnce from the current price
TIME_DIFFERENCE = 5
# the difference in % between the first and second checks for the price, by default set at 10 minutes apart.
CHANGE_IN_PRICE = 3
# define in % when to sell a coin that's not making a profit
STOP_LOSS = 3
# define in % when to take profit on a profitable coin
TAKE_PROFIT = 6
####################################################
# END OF USER INPUTS #
# Edit with care #
####################################################
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# use separate files for testnet and live
if TESTNET:
coins_bought_file_path = 'testnet_' + coins_bought_file_path
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
def get_price():
'''Return the current price for all coins on binance'''
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
# only Return USDT pairs and exlcude margin symbols like BTCDOWNUSDT
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
volatile_coins = {}
initial_price = get_price()
while initial_price['BNBUSDT']['time'] > datetime.now() - timedelta(minutes=TIME_DIFFERENCE):
print(f'not enough time has passed yet...')
# let's wait here until the time passess...
time.sleep(60*TIME_DIFFERENCE)
else:
last_price = get_price()
# calculate the difference between the first and last price reads
for coin in initial_price:
threshold_check = (float(last_price[coin]['price']) - float(initial_price[coin]['price'])) / float(last_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict
if threshold_check > CHANGE_IN_PRICE:
volatile_coins[coin] = threshold_check
volatile_coins[coin] = round(volatile_coins[coin], 3)
print(f'{coin} has gained {volatile_coins[coin]}% in the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}')
if len(volatile_coins) < 1:
print(f'No coins moved more than {CHANGE_IN_PRICE}% in the last {TIME_DIFFERENCE} minute(s)')
return volatile_coins, len(volatile_coins), last_price
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
print(f' preparing to buy {volume[coin]} {coin}')
if TESTNET :
# create test order before pushing an actual order
test_order = client.create_test_order(symbol=coin, side='BUY', type='MARKET', quantity=volume[coin])
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here unti binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT thershold'''
last_price = get_price()
coins_sold = {}
for coin in list(coins_bought):
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * TAKE_PROFIT) / 100
SL = float(coins_bought[coin]['bought_at']) - (float(coins_bought[coin]['bought_at']) * STOP_LOSS) / 100
# check that the price is above the take profit or below the stop loss
if float(last_price[coin]['price']) > TP or float(last_price[coin]['price']) < SL:
print(f"TP or SL reached, selling {coins_bought[coin]['volume']} {coin}...")
if TESTNET :
# create test order before pushing an actual order
test_order = client.create_test_order(symbol=coin, side='SELL', type='MARKET', quantity=coins_bought[coin]['volume'])
# try to create a real order if the test orders did not raise an exception
try:
# only sell 99.25% of the lot to avoid LOT exceptions
#sell_amount = coins_bought[coin]['volume']*99.25/100
sell_amount = coins_bought[coin]['volume']
decimals = len(str(coins_bought[coin]['volume']).split("."))
# convert to correct volume
sell_amount = float('{:.{}f}'.format(sell_amount, decimals))
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = sell_amount # coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
else:
print(f'TP or SL not yet reached, not selling {coin} for now...')
return coins_sold
def update_porfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin]
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portofio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
if __name__ == '__main__':
print('Press Ctrl-Q to stop the script')
if not TESTNET:
print('WARNING: You are using the Mainnet and live funds. As a safety measure, the script will start executing in 30 seconds.')
time.sleep(30)
for i in count():
orders, last_price, volume = buy()
update_porfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
|
[] |
[] |
[
"binance_secret_stalkbot_testnet",
"binance_secret_stalkbot_live",
"binance_api_stalkbot_live",
"binance_api_stalkbot_testnet"
] |
[]
|
["binance_secret_stalkbot_testnet", "binance_secret_stalkbot_live", "binance_api_stalkbot_live", "binance_api_stalkbot_testnet"]
|
python
| 4 | 0 | |
python/mxnet/libinfo.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Information about mxnet."""
from __future__ import absolute_import
import os
import platform
import logging
def find_lib_path():
"""Find MXNet dynamic library files.
Returns
-------
lib_path : list(string)
List of all found path to the libraries.
"""
lib_from_env = os.environ.get('MXNET_LIBRARY_PATH')
if lib_from_env:
if os.path.isfile(lib_from_env):
if not os.path.isabs(lib_from_env):
logging.warning("MXNET_LIBRARY_PATH should be an absolute path, instead of: %s",
lib_from_env)
else:
if os.name == 'nt':
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_from_env)
return [lib_from_env]
else:
logging.warning("MXNET_LIBRARY_PATH '%s' doesn't exist", lib_from_env)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
api_path = os.path.join(curr_path, '../../lib/')
cmake_build_path = os.path.join(curr_path, '../../build/')
dll_path = [curr_path, api_path, cmake_build_path]
if os.name == 'nt':
dll_path.append(os.path.join(curr_path, '../../build'))
vs_configuration = 'Release'
if platform.architecture()[0] == '64bit':
dll_path.append(os.path.join(curr_path, '../../build', vs_configuration))
dll_path.append(os.path.join(curr_path, '../../windows/x64', vs_configuration))
else:
dll_path.append(os.path.join(curr_path, '../../build', vs_configuration))
dll_path.append(os.path.join(curr_path, '../../windows', vs_configuration))
elif os.name == "posix" and os.environ.get('LD_LIBRARY_PATH', None):
dll_path[0:0] = [p.strip() for p in os.environ['LD_LIBRARY_PATH'].split(":")]
if os.name == 'nt':
os.environ['PATH'] = os.path.dirname(__file__) + ';' + os.environ['PATH']
dll_path = [os.path.join(p, 'libmxnet.dll') for p in dll_path]
elif platform.system() == 'Darwin':
dll_path = [os.path.join(p, 'libmxnet.dylib') for p in dll_path] + \
[os.path.join(p, 'libmxnet.so') for p in dll_path]
else:
dll_path.append('../../../')
dll_path = [os.path.join(p, 'libmxnet.so') for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
if len(lib_path) == 0:
raise RuntimeError('Cannot find the MXNet library.\n' +
'List of candidates:\n' + str('\n'.join(dll_path)))
if os.name == 'nt':
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_path[0])
return lib_path
def find_include_path():
"""Find MXNet included header files.
Returns
-------
incl_path : string
Path to the header files.
"""
incl_from_env = os.environ.get('MXNET_INCLUDE_PATH')
if incl_from_env:
if os.path.isdir(incl_from_env):
if not os.path.isabs(incl_from_env):
logging.warning("MXNET_INCLUDE_PATH should be an absolute path, instead of: %s",
incl_from_env)
else:
return incl_from_env
else:
logging.warning("MXNET_INCLUDE_PATH '%s' doesn't exist", incl_from_env)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# include path in pip package
pip_incl_path = os.path.join(curr_path, 'include/')
if os.path.isdir(pip_incl_path):
return pip_incl_path
else:
# include path if build from source
src_incl_path = os.path.join(curr_path, '../../include/')
if os.path.isdir(src_incl_path):
return src_incl_path
else:
raise RuntimeError('Cannot find the MXNet include path in either ' + pip_incl_path +
' or ' + src_incl_path + '\n')
# current version
__version__ = "1.5.0"
|
[] |
[] |
[
"MXNET_INCLUDE_PATH",
"LD_LIBRARY_PATH",
"PATH",
"MXNET_LIBRARY_PATH"
] |
[]
|
["MXNET_INCLUDE_PATH", "LD_LIBRARY_PATH", "PATH", "MXNET_LIBRARY_PATH"]
|
python
| 4 | 0 | |
ts/model_server.py
|
"""
File to define the entry point to Model Server
"""
import os
import re
import subprocess
import sys
import tempfile
from builtins import str
import platform
import psutil
from ts.version import __version__
from ts.arg_parser import ArgParser
def start():
"""
This is the entry point for model server
:return:
"""
args = ArgParser.ts_parser().parse_args()
pid_file = os.path.join(tempfile.gettempdir(), ".model_server.pid")
pid = None
if os.path.isfile(pid_file):
with open(pid_file, "r") as f:
pid = int(f.readline())
# pylint: disable=too-many-nested-blocks
if args.version:
print("TorchServe Version is {}".format(__version__))
return
if args.stop:
if pid is None:
print("TorchServe is not currently running.")
else:
try:
parent = psutil.Process(pid)
parent.terminate()
print("TorchServe has stopped.")
except (OSError, psutil.Error):
print("TorchServe already stopped.")
os.remove(pid_file)
else:
if pid is not None:
try:
psutil.Process(pid)
print("TorchServe is already running, please use torchserve --stop to stop TorchServe.")
sys.exit(1)
except psutil.Error:
print("Removing orphan pid file.")
os.remove(pid_file)
java_home = os.environ.get("JAVA_HOME")
java = "java" if not java_home else "{}/bin/java".format(java_home)
ts_home = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
cmd = [java, "-Dmodel_server_home={}".format(ts_home)]
if args.log_config:
log_config = os.path.realpath(args.log_config)
if not os.path.isfile(log_config):
print("--log-config file not found: {}".format(log_config))
sys.exit(1)
cmd.append("-Dlog4j.configurationFile=file://{}".format(log_config))
tmp_dir = os.environ.get("TEMP")
if tmp_dir:
if not os.path.isdir(tmp_dir):
print("Invalid temp directory: {}, please check TEMP environment variable.".format(tmp_dir))
sys.exit(1)
cmd.append("-Djava.io.tmpdir={}".format(tmp_dir))
ts_config = os.environ.get("TS_CONFIG_FILE")
if ts_config is None:
ts_config = args.ts_config
ts_conf_file = None
if ts_config:
if not os.path.isfile(ts_config):
print("--ts-config file not found: {}".format(ts_config))
sys.exit(1)
ts_conf_file = ts_config
platform_path_separator = {"Windows": "", "Darwin": ".:", "Linux": ".:"}
class_path = "{}{}".format(platform_path_separator[platform.system()], os.path.join(ts_home, "ts/frontend/*"))
if ts_conf_file and os.path.isfile(ts_conf_file):
props = load_properties(ts_conf_file)
vm_args = props.get("vmargs")
if vm_args:
print("Warning: TorchServe is using non-default JVM parameters: {}".format(vm_args))
arg_list = vm_args.split()
if args.log_config:
for word in arg_list[:]:
if word.startswith("-Dlog4j.configurationFile="):
arg_list.remove(word)
cmd.extend(arg_list)
plugins = props.get("plugins_path", None)
if plugins:
class_path += ":" + plugins + "/*" if "*" not in plugins else ":" + plugins
if not args.model_store and props.get('model_store'):
args.model_store = props.get('model_store')
if args.plugins_path:
class_path += ":" + args.plugins_path + "/*" if "*" not in args.plugins_path else ":" + args.plugins_path
cmd.append("-cp")
cmd.append(class_path)
cmd.append("org.pytorch.serve.ModelServer")
# model-server.jar command line parameters
cmd.append("--python")
cmd.append(sys.executable)
if ts_conf_file is not None:
cmd.append("-f")
cmd.append(ts_conf_file)
if args.model_store:
if not os.path.isdir(args.model_store):
print("--model-store directory not found: {}".format(args.model_store))
sys.exit(1)
cmd.append("-s")
cmd.append(args.model_store)
else:
print("Missing mandatory parameter --model-store")
sys.exit(1)
if args.workflow_store:
if not os.path.isdir(args.workflow_store):
print("--workflow-store directory not found: {}".format(args.workflow_store))
sys.exit(1)
cmd.append("-w")
cmd.append(args.workflow_store)
else:
cmd.append("-w")
cmd.append(args.model_store)
if args.no_config_snapshots:
cmd.append("-ncs")
if args.models:
cmd.append("-m")
cmd.extend(args.models)
if not args.model_store:
pattern = re.compile(r"(.+=)?http(s)?://.+", re.IGNORECASE)
for model_url in args.models:
if not pattern.match(model_url) and model_url != "ALL":
print("--model-store is required to load model locally.")
sys.exit(1)
try:
process = subprocess.Popen(cmd)
pid = process.pid
with open(pid_file, "w") as pf:
pf.write(str(pid))
if args.foreground:
process.wait()
except OSError as e:
if e.errno == 2:
print("java not found, please make sure JAVA_HOME is set properly.")
else:
print("start java frontend failed:", sys.exc_info())
def load_properties(file_path):
"""
Read properties file into map.
"""
props = {}
with open(file_path, "rt") as f:
for line in f:
line = line.strip()
if not line.startswith("#"):
pair = line.split("=", 1)
if len(pair) > 1:
key = pair[0].strip()
props[key] = pair[1].strip()
return props
if __name__ == "__main__":
start()
|
[] |
[] |
[
"JAVA_HOME",
"TS_CONFIG_FILE",
"TEMP"
] |
[]
|
["JAVA_HOME", "TS_CONFIG_FILE", "TEMP"]
|
python
| 3 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LaChalupa.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
clients/google-api-services-chromepolicy/v1/1.31.0/com/google/api/services/chromepolicy/v1/ChromePolicy.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.chromepolicy.v1;
/**
* Service definition for ChromePolicy (v1).
*
* <p>
* The Chrome Policy API is a suite of services that allows Chrome administrators to control the policies applied to their managed Chrome OS devices and Chrome browsers.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="http://developers.google.com/chrome/policy" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link ChromePolicyRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class ChromePolicy extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.31.0 of the Chrome Policy API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://chromepolicy.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://chromepolicy.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public ChromePolicy(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
ChromePolicy(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Customers collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ChromePolicy chromepolicy = new ChromePolicy(...);}
* {@code ChromePolicy.Customers.List request = chromepolicy.customers().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Customers customers() {
return new Customers();
}
/**
* The "customers" collection of methods.
*/
public class Customers {
/**
* An accessor for creating requests from the Policies collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ChromePolicy chromepolicy = new ChromePolicy(...);}
* {@code ChromePolicy.Policies.List request = chromepolicy.policies().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Policies policies() {
return new Policies();
}
/**
* The "policies" collection of methods.
*/
public class Policies {
/**
* Gets the resolved policy values for a list of policies that match a search query.
*
* Create a request for the method "policies.resolve".
*
* This request holds the parameters needed by the chromepolicy server. After setting any optional
* parameters, call the {@link Resolve#execute()} method to invoke the remote operation.
*
* @param customer ID of the G Suite account or literal "my_customer" for the customer associated to the request.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ResolveRequest}
* @return the request
*/
public Resolve resolve(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ResolveRequest content) throws java.io.IOException {
Resolve result = new Resolve(customer, content);
initialize(result);
return result;
}
public class Resolve extends ChromePolicyRequest<com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ResolveResponse> {
private static final String REST_PATH = "v1/{+customer}/policies:resolve";
private final java.util.regex.Pattern CUSTOMER_PATTERN =
java.util.regex.Pattern.compile("^customers/[^/]+$");
/**
* Gets the resolved policy values for a list of policies that match a search query.
*
* Create a request for the method "policies.resolve".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link Resolve#execute()} method to invoke the remote operation.
* <p> {@link
* Resolve#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param customer ID of the G Suite account or literal "my_customer" for the customer associated to the request.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ResolveRequest}
* @since 1.13
*/
protected Resolve(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ResolveRequest content) {
super(ChromePolicy.this, "POST", REST_PATH, content, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ResolveResponse.class);
this.customer = com.google.api.client.util.Preconditions.checkNotNull(customer, "Required parameter customer must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
}
@Override
public Resolve set$Xgafv(java.lang.String $Xgafv) {
return (Resolve) super.set$Xgafv($Xgafv);
}
@Override
public Resolve setAccessToken(java.lang.String accessToken) {
return (Resolve) super.setAccessToken(accessToken);
}
@Override
public Resolve setAlt(java.lang.String alt) {
return (Resolve) super.setAlt(alt);
}
@Override
public Resolve setCallback(java.lang.String callback) {
return (Resolve) super.setCallback(callback);
}
@Override
public Resolve setFields(java.lang.String fields) {
return (Resolve) super.setFields(fields);
}
@Override
public Resolve setKey(java.lang.String key) {
return (Resolve) super.setKey(key);
}
@Override
public Resolve setOauthToken(java.lang.String oauthToken) {
return (Resolve) super.setOauthToken(oauthToken);
}
@Override
public Resolve setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Resolve) super.setPrettyPrint(prettyPrint);
}
@Override
public Resolve setQuotaUser(java.lang.String quotaUser) {
return (Resolve) super.setQuotaUser(quotaUser);
}
@Override
public Resolve setUploadType(java.lang.String uploadType) {
return (Resolve) super.setUploadType(uploadType);
}
@Override
public Resolve setUploadProtocol(java.lang.String uploadProtocol) {
return (Resolve) super.setUploadProtocol(uploadProtocol);
}
/**
* ID of the G Suite account or literal "my_customer" for the customer associated to the
* request.
*/
@com.google.api.client.util.Key
private java.lang.String customer;
/** ID of the G Suite account or literal "my_customer" for the customer associated to the request.
*/
public java.lang.String getCustomer() {
return customer;
}
/**
* ID of the G Suite account or literal "my_customer" for the customer associated to the
* request.
*/
public Resolve setCustomer(java.lang.String customer) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
this.customer = customer;
return this;
}
@Override
public Resolve set(String parameterName, Object value) {
return (Resolve) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Orgunits collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ChromePolicy chromepolicy = new ChromePolicy(...);}
* {@code ChromePolicy.Orgunits.List request = chromepolicy.orgunits().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Orgunits orgunits() {
return new Orgunits();
}
/**
* The "orgunits" collection of methods.
*/
public class Orgunits {
/**
* Modify multiple policy values that are applied to a specific org unit so that they now inherit
* the value from a parent (if applicable). All targets must have the same target format. That is to
* say that they must point to the same target resource and must have the same keys specified in
* `additionalTargetKeyNames`. On failure the request will return the error details as part of the
* google.rpc.Status.
*
* Create a request for the method "orgunits.batchInherit".
*
* This request holds the parameters needed by the chromepolicy server. After setting any optional
* parameters, call the {@link BatchInherit#execute()} method to invoke the remote operation.
*
* @param customer ID of the G Suite account or literal "my_customer" for the customer associated to the request.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchInheritOrgUnitPoliciesRequest}
* @return the request
*/
public BatchInherit batchInherit(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchInheritOrgUnitPoliciesRequest content) throws java.io.IOException {
BatchInherit result = new BatchInherit(customer, content);
initialize(result);
return result;
}
public class BatchInherit extends ChromePolicyRequest<com.google.api.services.chromepolicy.v1.model.GoogleProtobufEmpty> {
private static final String REST_PATH = "v1/{+customer}/policies/orgunits:batchInherit";
private final java.util.regex.Pattern CUSTOMER_PATTERN =
java.util.regex.Pattern.compile("^customers/[^/]+$");
/**
* Modify multiple policy values that are applied to a specific org unit so that they now inherit
* the value from a parent (if applicable). All targets must have the same target format. That is
* to say that they must point to the same target resource and must have the same keys specified
* in `additionalTargetKeyNames`. On failure the request will return the error details as part of
* the google.rpc.Status.
*
* Create a request for the method "orgunits.batchInherit".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link BatchInherit#execute()} method to invoke the remote
* operation. <p> {@link
* BatchInherit#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param customer ID of the G Suite account or literal "my_customer" for the customer associated to the request.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchInheritOrgUnitPoliciesRequest}
* @since 1.13
*/
protected BatchInherit(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchInheritOrgUnitPoliciesRequest content) {
super(ChromePolicy.this, "POST", REST_PATH, content, com.google.api.services.chromepolicy.v1.model.GoogleProtobufEmpty.class);
this.customer = com.google.api.client.util.Preconditions.checkNotNull(customer, "Required parameter customer must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
}
@Override
public BatchInherit set$Xgafv(java.lang.String $Xgafv) {
return (BatchInherit) super.set$Xgafv($Xgafv);
}
@Override
public BatchInherit setAccessToken(java.lang.String accessToken) {
return (BatchInherit) super.setAccessToken(accessToken);
}
@Override
public BatchInherit setAlt(java.lang.String alt) {
return (BatchInherit) super.setAlt(alt);
}
@Override
public BatchInherit setCallback(java.lang.String callback) {
return (BatchInherit) super.setCallback(callback);
}
@Override
public BatchInherit setFields(java.lang.String fields) {
return (BatchInherit) super.setFields(fields);
}
@Override
public BatchInherit setKey(java.lang.String key) {
return (BatchInherit) super.setKey(key);
}
@Override
public BatchInherit setOauthToken(java.lang.String oauthToken) {
return (BatchInherit) super.setOauthToken(oauthToken);
}
@Override
public BatchInherit setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BatchInherit) super.setPrettyPrint(prettyPrint);
}
@Override
public BatchInherit setQuotaUser(java.lang.String quotaUser) {
return (BatchInherit) super.setQuotaUser(quotaUser);
}
@Override
public BatchInherit setUploadType(java.lang.String uploadType) {
return (BatchInherit) super.setUploadType(uploadType);
}
@Override
public BatchInherit setUploadProtocol(java.lang.String uploadProtocol) {
return (BatchInherit) super.setUploadProtocol(uploadProtocol);
}
/**
* ID of the G Suite account or literal "my_customer" for the customer associated to the
* request.
*/
@com.google.api.client.util.Key
private java.lang.String customer;
/** ID of the G Suite account or literal "my_customer" for the customer associated to the request.
*/
public java.lang.String getCustomer() {
return customer;
}
/**
* ID of the G Suite account or literal "my_customer" for the customer associated to the
* request.
*/
public BatchInherit setCustomer(java.lang.String customer) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
this.customer = customer;
return this;
}
@Override
public BatchInherit set(String parameterName, Object value) {
return (BatchInherit) super.set(parameterName, value);
}
}
/**
* Modify multiple policy values that are applied to a specific org unit. All targets must have the
* same target format. That is to say that they must point to the same target resource and must have
* the same keys specified in `additionalTargetKeyNames`. On failure the request will return the
* error details as part of the google.rpc.Status.
*
* Create a request for the method "orgunits.batchModify".
*
* This request holds the parameters needed by the chromepolicy server. After setting any optional
* parameters, call the {@link BatchModify#execute()} method to invoke the remote operation.
*
* @param customer ID of the G Suite account or literal "my_customer" for the customer associated to the request.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchModifyOrgUnitPoliciesRequest}
* @return the request
*/
public BatchModify batchModify(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchModifyOrgUnitPoliciesRequest content) throws java.io.IOException {
BatchModify result = new BatchModify(customer, content);
initialize(result);
return result;
}
public class BatchModify extends ChromePolicyRequest<com.google.api.services.chromepolicy.v1.model.GoogleProtobufEmpty> {
private static final String REST_PATH = "v1/{+customer}/policies/orgunits:batchModify";
private final java.util.regex.Pattern CUSTOMER_PATTERN =
java.util.regex.Pattern.compile("^customers/[^/]+$");
/**
* Modify multiple policy values that are applied to a specific org unit. All targets must have
* the same target format. That is to say that they must point to the same target resource and
* must have the same keys specified in `additionalTargetKeyNames`. On failure the request will
* return the error details as part of the google.rpc.Status.
*
* Create a request for the method "orgunits.batchModify".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link BatchModify#execute()} method to invoke the remote
* operation. <p> {@link
* BatchModify#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param customer ID of the G Suite account or literal "my_customer" for the customer associated to the request.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchModifyOrgUnitPoliciesRequest}
* @since 1.13
*/
protected BatchModify(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1BatchModifyOrgUnitPoliciesRequest content) {
super(ChromePolicy.this, "POST", REST_PATH, content, com.google.api.services.chromepolicy.v1.model.GoogleProtobufEmpty.class);
this.customer = com.google.api.client.util.Preconditions.checkNotNull(customer, "Required parameter customer must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
}
@Override
public BatchModify set$Xgafv(java.lang.String $Xgafv) {
return (BatchModify) super.set$Xgafv($Xgafv);
}
@Override
public BatchModify setAccessToken(java.lang.String accessToken) {
return (BatchModify) super.setAccessToken(accessToken);
}
@Override
public BatchModify setAlt(java.lang.String alt) {
return (BatchModify) super.setAlt(alt);
}
@Override
public BatchModify setCallback(java.lang.String callback) {
return (BatchModify) super.setCallback(callback);
}
@Override
public BatchModify setFields(java.lang.String fields) {
return (BatchModify) super.setFields(fields);
}
@Override
public BatchModify setKey(java.lang.String key) {
return (BatchModify) super.setKey(key);
}
@Override
public BatchModify setOauthToken(java.lang.String oauthToken) {
return (BatchModify) super.setOauthToken(oauthToken);
}
@Override
public BatchModify setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BatchModify) super.setPrettyPrint(prettyPrint);
}
@Override
public BatchModify setQuotaUser(java.lang.String quotaUser) {
return (BatchModify) super.setQuotaUser(quotaUser);
}
@Override
public BatchModify setUploadType(java.lang.String uploadType) {
return (BatchModify) super.setUploadType(uploadType);
}
@Override
public BatchModify setUploadProtocol(java.lang.String uploadProtocol) {
return (BatchModify) super.setUploadProtocol(uploadProtocol);
}
/**
* ID of the G Suite account or literal "my_customer" for the customer associated to the
* request.
*/
@com.google.api.client.util.Key
private java.lang.String customer;
/** ID of the G Suite account or literal "my_customer" for the customer associated to the request.
*/
public java.lang.String getCustomer() {
return customer;
}
/**
* ID of the G Suite account or literal "my_customer" for the customer associated to the
* request.
*/
public BatchModify setCustomer(java.lang.String customer) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
this.customer = customer;
return this;
}
@Override
public BatchModify set(String parameterName, Object value) {
return (BatchModify) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the PolicySchemas collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ChromePolicy chromepolicy = new ChromePolicy(...);}
* {@code ChromePolicy.PolicySchemas.List request = chromepolicy.policySchemas().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public PolicySchemas policySchemas() {
return new PolicySchemas();
}
/**
* The "policySchemas" collection of methods.
*/
public class PolicySchemas {
/**
* Get a specific policy schema for a customer by its resource name.
*
* Create a request for the method "policySchemas.get".
*
* This request holds the parameters needed by the chromepolicy server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. The policy schema resource name to query.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends ChromePolicyRequest<com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1PolicySchema> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^customers/[^/]+/policySchemas/.*$");
/**
* Get a specific policy schema for a customer by its resource name.
*
* Create a request for the method "policySchemas.get".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The policy schema resource name to query.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(ChromePolicy.this, "GET", REST_PATH, null, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1PolicySchema.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^customers/[^/]+/policySchemas/.*$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The policy schema resource name to query. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The policy schema resource name to query.
*/
public java.lang.String getName() {
return name;
}
/** Required. The policy schema resource name to query. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^customers/[^/]+/policySchemas/.*$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Gets a list of policy schemas that match a specified filter value for a given customer.
*
* Create a request for the method "policySchemas.list".
*
* This request holds the parameters needed by the chromepolicy server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. The customer for which the listing request will apply.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends ChromePolicyRequest<com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ListPolicySchemasResponse> {
private static final String REST_PATH = "v1/{+parent}/policySchemas";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^customers/[^/]+$");
/**
* Gets a list of policy schemas that match a specified filter value for a given customer.
*
* Create a request for the method "policySchemas.list".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The customer for which the listing request will apply.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(ChromePolicy.this, "GET", REST_PATH, null, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1ListPolicySchemasResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^customers/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The customer for which the listing request will apply. */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The customer for which the listing request will apply.
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The customer for which the listing request will apply. */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^customers/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* The schema filter used to find a particular schema based on fields like its resource
* name, description and `additionalTargetKeyNames`.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** The schema filter used to find a particular schema based on fields like its resource name,
description and `additionalTargetKeyNames`.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* The schema filter used to find a particular schema based on fields like its resource
* name, description and `additionalTargetKeyNames`.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The maximum number of policy schemas to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of policy schemas to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of policy schemas to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The page token used to retrieve a specific page of the listing request. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The page token used to retrieve a specific page of the listing request.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The page token used to retrieve a specific page of the listing request. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Media collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ChromePolicy chromepolicy = new ChromePolicy(...);}
* {@code ChromePolicy.Media.List request = chromepolicy.media().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Media media() {
return new Media();
}
/**
* The "media" collection of methods.
*/
public class Media {
/**
* Creates an enterprise file from the content provided by user. Returns a public download url for
* end user.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the chromepolicy server. After setting any optional
* parameters, call the {@link Upload#execute()} method to invoke the remote operation.
*
* @param customer Required. The customer for which the file upload will apply.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest}
* @return the request
*/
public Upload upload(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest content) throws java.io.IOException {
Upload result = new Upload(customer, content);
initialize(result);
return result;
}
/**
* Creates an enterprise file from the content provided by user. Returns a public download url for
* end user.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
*
* <p>
* This method should be used for uploading media content.
* </p>
*
* @param customer Required. The customer for which the file upload will apply.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @return the request
* @throws java.io.IOException if the initialization of the request fails
*/
public Upload upload(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest content, com.google.api.client.http.AbstractInputStreamContent mediaContent) throws java.io.IOException {
Upload result = new Upload(customer, content, mediaContent);
initialize(result);
return result;
}
public class Upload extends ChromePolicyRequest<com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileResponse> {
private static final String REST_PATH = "v1/{+customer}/policies/files:uploadPolicyFile";
private final java.util.regex.Pattern CUSTOMER_PATTERN =
java.util.regex.Pattern.compile("^customers/[^/]+$");
/**
* Creates an enterprise file from the content provided by user. Returns a public download url for
* end user.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
* <p> {@link
* Upload#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param customer Required. The customer for which the file upload will apply.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest}
* @since 1.13
*/
protected Upload(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest content) {
super(ChromePolicy.this, "POST", REST_PATH, content, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileResponse.class);
this.customer = com.google.api.client.util.Preconditions.checkNotNull(customer, "Required parameter customer must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
}
/**
* Creates an enterprise file from the content provided by user. Returns a public download url for
* end user.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the the chromepolicy server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
* <p> {@link
* Upload#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* <p>
* This constructor should be used for uploading media content.
* </p>
*
* @param customer Required. The customer for which the file upload will apply.
* @param content the {@link com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @since 1.13
*/
protected Upload(java.lang.String customer, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileRequest content, com.google.api.client.http.AbstractInputStreamContent mediaContent) {
super(ChromePolicy.this, "POST", "/upload/" + getServicePath() + REST_PATH, content, com.google.api.services.chromepolicy.v1.model.GoogleChromePolicyV1UploadPolicyFileResponse.class);
this.customer = com.google.api.client.util.Preconditions.checkNotNull(customer, "Required parameter customer must be specified.");
initializeMediaUpload(mediaContent);
}
@Override
public Upload set$Xgafv(java.lang.String $Xgafv) {
return (Upload) super.set$Xgafv($Xgafv);
}
@Override
public Upload setAccessToken(java.lang.String accessToken) {
return (Upload) super.setAccessToken(accessToken);
}
@Override
public Upload setAlt(java.lang.String alt) {
return (Upload) super.setAlt(alt);
}
@Override
public Upload setCallback(java.lang.String callback) {
return (Upload) super.setCallback(callback);
}
@Override
public Upload setFields(java.lang.String fields) {
return (Upload) super.setFields(fields);
}
@Override
public Upload setKey(java.lang.String key) {
return (Upload) super.setKey(key);
}
@Override
public Upload setOauthToken(java.lang.String oauthToken) {
return (Upload) super.setOauthToken(oauthToken);
}
@Override
public Upload setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Upload) super.setPrettyPrint(prettyPrint);
}
@Override
public Upload setQuotaUser(java.lang.String quotaUser) {
return (Upload) super.setQuotaUser(quotaUser);
}
@Override
public Upload setUploadType(java.lang.String uploadType) {
return (Upload) super.setUploadType(uploadType);
}
@Override
public Upload setUploadProtocol(java.lang.String uploadProtocol) {
return (Upload) super.setUploadProtocol(uploadProtocol);
}
/** Required. The customer for which the file upload will apply. */
@com.google.api.client.util.Key
private java.lang.String customer;
/** Required. The customer for which the file upload will apply.
*/
public java.lang.String getCustomer() {
return customer;
}
/** Required. The customer for which the file upload will apply. */
public Upload setCustomer(java.lang.String customer) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(CUSTOMER_PATTERN.matcher(customer).matches(),
"Parameter customer must conform to the pattern " +
"^customers/[^/]+$");
}
this.customer = customer;
return this;
}
@Override
public Upload set(String parameterName, Object value) {
return (Upload) super.set(parameterName, value);
}
}
}
/**
* Builder for {@link ChromePolicy}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link ChromePolicy}. */
@Override
public ChromePolicy build() {
return new ChromePolicy(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link ChromePolicyRequestInitializer}.
*
* @since 1.12
*/
public Builder setChromePolicyRequestInitializer(
ChromePolicyRequestInitializer chromepolicyRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(chromepolicyRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
|
[
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT"]
|
java
| 1 | 0 | |
ethereum/mobile/android_test.go
|
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package geth
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/tenderly/solidity-hmr/ethereum/internal/build"
)
// androidTestClass is a Java class to do some lightweight tests against the Android
// bindings. The goal is not to test each individual functionality, rather just to
// catch breaking API and/or implementation changes.
const androidTestClass = `
package go;
import android.test.InstrumentationTestCase;
import android.test.MoreAsserts;
import java.math.BigInteger;
import java.util.Arrays;
import org.ethereum.geth.*;
public class AndroidTest extends InstrumentationTestCase {
public AndroidTest() {}
public void testAccountManagement() {
// Create an encrypted keystore with light crypto parameters.
KeyStore ks = new KeyStore(getInstrumentation().getContext().getFilesDir() + "/keystore", Geth.LightScryptN, Geth.LightScryptP);
try {
// Create a new account with the specified encryption passphrase.
Account newAcc = ks.newAccount("Creation password");
// Export the newly created account with a different passphrase. The returned
// data from this method invocation is a JSON encoded, encrypted key-file.
byte[] jsonAcc = ks.exportKey(newAcc, "Creation password", "Export password");
// Update the passphrase on the account created above inside the local keystore.
ks.updateAccount(newAcc, "Creation password", "Update password");
// Delete the account updated above from the local keystore.
ks.deleteAccount(newAcc, "Update password");
// Import back the account we've exported (and then deleted) above with yet
// again a fresh passphrase.
Account impAcc = ks.importKey(jsonAcc, "Export password", "Import password");
// Create a new account to sign transactions with
Account signer = ks.newAccount("Signer password");
Transaction tx = new Transaction(
1, new Address("0x0000000000000000000000000000000000000000"),
new BigInt(0), 0, new BigInt(1), null); // Random empty transaction
BigInt chain = new BigInt(1); // Chain identifier of the main net
// Sign a transaction with a single authorization
Transaction signed = ks.signTxPassphrase(signer, "Signer password", tx, chain);
// Sign a transaction with multiple manually cancelled authorizations
ks.unlock(signer, "Signer password");
signed = ks.signTx(signer, tx, chain);
ks.lock(signer.getAddress());
// Sign a transaction with multiple automatically cancelled authorizations
ks.timedUnlock(signer, "Signer password", 1000000000);
signed = ks.signTx(signer, tx, chain);
} catch (Exception e) {
fail(e.toString());
}
}
public void testInprocNode() {
Context ctx = new Context();
try {
// Start up a new inprocess node
Node node = new Node(getInstrumentation().getContext().getFilesDir() + "/.ethereum", new NodeConfig());
node.start();
// Retrieve some data via function calls (we don't really care about the results)
NodeInfo info = node.getNodeInfo();
info.getName();
info.getListenerAddress();
info.getProtocols();
// Retrieve some data via the APIs (we don't really care about the results)
EthereumClient ec = node.getEthereumClient();
ec.getBlockByNumber(ctx, -1).getNumber();
NewHeadHandler handler = new NewHeadHandler() {
@Override public void onError(String error) {}
@Override public void onNewHead(final Header header) {}
};
ec.subscribeNewHead(ctx, handler, 16);
} catch (Exception e) {
fail(e.toString());
}
}
// Tests that recovering transaction signers works for both Homestead and EIP155
// signatures too. Regression test for go-ethereum issue #14599.
public void testIssue14599() {
try {
byte[] preEIP155RLP = new BigInteger("f901fc8032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b561ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884", 16).toByteArray();
preEIP155RLP = Arrays.copyOfRange(preEIP155RLP, 1, preEIP155RLP.length);
byte[] postEIP155RLP = new BigInteger("f86b80847735940082520894ef5bbb9bba2e1ca69ef81b23a8727d889f3ef0a1880de0b6b3a7640000802ba06fef16c44726a102e6d55a651740636ef8aec6df3ebf009e7b0c1f29e4ac114aa057e7fbc69760b522a78bb568cfc37a58bfdcf6ea86cb8f9b550263f58074b9cc", 16).toByteArray();
postEIP155RLP = Arrays.copyOfRange(postEIP155RLP, 1, postEIP155RLP.length);
Transaction preEIP155 = new Transaction(preEIP155RLP);
Transaction postEIP155 = new Transaction(postEIP155RLP);
preEIP155.getFrom(null); // Homestead should accept homestead
preEIP155.getFrom(new BigInt(4)); // EIP155 should accept homestead (missing chain ID)
postEIP155.getFrom(new BigInt(4)); // EIP155 should accept EIP 155
try {
postEIP155.getFrom(null);
fail("EIP155 transaction accepted by Homestead");
} catch (Exception e) {}
} catch (Exception e) {
fail(e.toString());
}
}
}
`
// TestAndroid runs the Android java test class specified above.
//
// This requires the gradle command in PATH and the Android SDK whose path is available
// through ANDROID_HOME environment variable. To successfully run the tests, an Android
// device must also be available with debugging enabled.
//
// This method has been adapted from golang.org/x/mobile/bind/java/seq_test.go/runTest
func TestAndroid(t *testing.T) {
// Skip tests on Windows altogether
if runtime.GOOS == "windows" {
t.Skip("cannot test Android bindings on Windows, skipping")
}
// Make sure all the Android tools are installed
if _, err := exec.Command("which", "gradle").CombinedOutput(); err != nil {
t.Skip("command gradle not found, skipping")
}
if sdk := os.Getenv("ANDROID_HOME"); sdk == "" {
// Android SDK not explicitly given, try to auto-resolve
autopath := filepath.Join(os.Getenv("HOME"), "Android", "Sdk")
if _, err := os.Stat(autopath); err != nil {
t.Skip("ANDROID_HOME environment var not set, skipping")
}
os.Setenv("ANDROID_HOME", autopath)
}
if _, err := exec.Command("which", "gomobile").CombinedOutput(); err != nil {
t.Log("gomobile missing, installing it...")
if out, err := exec.Command("go", "get", "golang.org/x/mobile/cmd/gomobile").CombinedOutput(); err != nil {
t.Fatalf("install failed: %v\n%s", err, string(out))
}
t.Log("initializing gomobile...")
start := time.Now()
if _, err := exec.Command("gomobile", "init").CombinedOutput(); err != nil {
t.Fatalf("initialization failed: %v", err)
}
t.Logf("initialization took %v", time.Since(start))
}
// Create and switch to a temporary workspace
workspace, err := ioutil.TempDir("", "geth-android-")
if err != nil {
t.Fatalf("failed to create temporary workspace: %v", err)
}
defer os.RemoveAll(workspace)
pwd, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get current working directory: %v", err)
}
if err := os.Chdir(workspace); err != nil {
t.Fatalf("failed to switch to temporary workspace: %v", err)
}
defer os.Chdir(pwd)
// Create the skeleton of the Android project
for _, dir := range []string{"src/main", "src/androidTest/java/org/ethereum/gethtest", "libs"} {
err = os.MkdirAll(dir, os.ModePerm)
if err != nil {
t.Fatal(err)
}
}
// Generate the mobile bindings for Geth and add the tester class
gobind := exec.Command("gomobile", "bind", "-javapkg", "org.ethereum", "github.com/tenderly/solidity-hmr/ethereum/mobile")
if output, err := gobind.CombinedOutput(); err != nil {
t.Logf("%s", output)
t.Fatalf("failed to run gomobile bind: %v", err)
}
build.CopyFile(filepath.Join("libs", "geth.aar"), "geth.aar", os.ModePerm)
if err = ioutil.WriteFile(filepath.Join("src", "androidTest", "java", "org", "ethereum", "gethtest", "AndroidTest.java"), []byte(androidTestClass), os.ModePerm); err != nil {
t.Fatalf("failed to write Android test class: %v", err)
}
// Finish creating the project and run the tests via gradle
if err = ioutil.WriteFile(filepath.Join("src", "main", "AndroidManifest.xml"), []byte(androidManifest), os.ModePerm); err != nil {
t.Fatalf("failed to write Android manifest: %v", err)
}
if err = ioutil.WriteFile("build.gradle", []byte(gradleConfig), os.ModePerm); err != nil {
t.Fatalf("failed to write gradle build file: %v", err)
}
if output, err := exec.Command("gradle", "connectedAndroidTest").CombinedOutput(); err != nil {
t.Logf("%s", output)
t.Errorf("failed to run gradle test: %v", err)
}
}
const androidManifest = `<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.ethereum.gethtest"
android:versionCode="1"
android:versionName="1.0">
<uses-permission android:name="android.permission.INTERNET" />
</manifest>`
const gradleConfig = `buildscript {
repositories {
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:2.2.3'
}
}
allprojects {
repositories { jcenter() }
}
apply plugin: 'com.android.library'
android {
compileSdkVersion 'android-19'
buildToolsVersion '21.1.2'
defaultConfig { minSdkVersion 15 }
}
repositories {
flatDir { dirs 'libs' }
}
dependencies {
compile 'com.android.support:appcompat-v7:19.0.0'
compile(name: "geth", ext: "aar")
}
`
|
[
"\"ANDROID_HOME\"",
"\"HOME\""
] |
[] |
[
"HOME",
"ANDROID_HOME"
] |
[]
|
["HOME", "ANDROID_HOME"]
|
go
| 2 | 0 | |
python/ray/workers/setup_runtime_env.py
|
import os
import sys
import argparse
import json
import logging
import yaml
import hashlib
from filelock import FileLock
from typing import Optional, List, Dict, Any
from pathlib import Path
import ray
from ray._private.conda import (get_conda_activate_commands,
get_or_create_conda_env)
from ray._private.utils import try_to_create_directory
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
from ray.workers.pluggable_runtime_env import RuntimeEnvContext
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument(
"--serialized-runtime-env",
type=str,
help="the serialized parsed runtime env dict")
parser.add_argument(
"--serialized-runtime-env-context",
type=str,
help="the serialized runtime env context")
# The worker is not set up yet, so we can't get session_dir from the worker.
parser.add_argument(
"--session-dir", type=str, help="the directory for the current session")
def setup_runtime_env(runtime_env: dict, session_dir):
if runtime_env.get("conda") or runtime_env.get("pip"):
conda_dict = get_conda_dict(runtime_env, session_dir)
if isinstance(runtime_env.get("conda"), str):
conda_env_name = runtime_env["conda"]
else:
assert conda_dict is not None
py_version = ".".join(map(str,
sys.version_info[:3])) # like 3.6.10
ray_pip = current_ray_pip_specifier()
if ray_pip and not runtime_env.get("_skip_inject_ray"):
extra_pip_dependencies = [ray_pip, "ray[default]"]
else:
extra_pip_dependencies = []
conda_dict = inject_dependencies(conda_dict, py_version,
extra_pip_dependencies)
# Locking to avoid multiple processes installing concurrently
conda_hash = hashlib.sha1(
json.dumps(conda_dict,
sort_keys=True).encode("utf-8")).hexdigest()
conda_hash_str = f"conda-generated-{conda_hash}"
file_lock_name = f"ray-{conda_hash_str}.lock"
with FileLock(os.path.join(session_dir, file_lock_name)):
conda_dir = os.path.join(session_dir, "runtime_resources",
"conda")
try_to_create_directory(conda_dir)
conda_yaml_path = os.path.join(conda_dir, "environment.yml")
with open(conda_yaml_path, "w") as file:
# Sort keys because we hash based on the file contents,
# and we don't want the hash to depend on the order
# of the dependencies.
yaml.dump(conda_dict, file, sort_keys=True)
conda_env_name = get_or_create_conda_env(
conda_yaml_path, conda_dir)
return RuntimeEnvContext(conda_env_name)
return RuntimeEnvContext()
def setup_worker(input_args):
# remaining_args contains the arguments to the original worker command,
# minus the python executable, e.g. default_worker.py --node-ip-address=...
args, remaining_args = parser.parse_known_args(args=input_args)
commands = []
py_executable: str = sys.executable
runtime_env: dict = json.loads(args.serialized_runtime_env or "{}")
runtime_env_context: RuntimeEnvContext = None
# Ray client server setups runtime env by itself instead of agent.
if runtime_env.get("conda") or runtime_env.get("pip"):
if not args.serialized_runtime_env_context:
runtime_env_context = setup_runtime_env(runtime_env,
args.session_dir)
else:
runtime_env_context = RuntimeEnvContext.deserialize(
args.serialized_runtime_env_context)
# activate conda
if runtime_env_context and runtime_env_context.conda_env_name:
py_executable = "python"
conda_activate_commands = get_conda_activate_commands(
runtime_env_context.conda_env_name)
if (conda_activate_commands):
commands += conda_activate_commands
elif runtime_env.get("conda"):
logger.warning(
"Conda env name is not found in context, "
"but conda exists in runtime env. The runtime env %s, "
"the context %s.", args.serialized_runtime_env,
args.serialized_runtime_env_context)
commands += [" ".join([f"exec {py_executable}"] + remaining_args)]
command_separator = " && "
command_str = command_separator.join(commands)
# update env vars
if runtime_env.get("env_vars"):
env_vars = runtime_env["env_vars"]
os.environ.update(env_vars)
os.execvp("bash", ["bash", "-c", command_str])
def get_conda_dict(runtime_env, runtime_env_dir) -> Optional[Dict[Any, Any]]:
""" Construct a conda dependencies dict from a runtime env.
This function does not inject Ray or Python into the conda dict.
If the runtime env does not specify pip or conda, or if it specifies
the name of a preinstalled conda environment, this function returns
None. If pip is specified, a conda dict is created containing the
pip dependencies. If conda is already given as a dict, this function
is the identity function.
"""
if runtime_env.get("conda"):
if isinstance(runtime_env["conda"], dict):
return runtime_env["conda"]
else:
return None
if runtime_env.get("pip"):
requirements_txt = runtime_env["pip"]
pip_hash = hashlib.sha1(requirements_txt.encode("utf-8")).hexdigest()
pip_hash_str = f"pip-generated-{pip_hash}"
conda_dir = os.path.join(runtime_env_dir, "conda")
requirements_txt_path = os.path.join(
conda_dir, f"requirements-{pip_hash_str}.txt")
conda_dict = {
"name": pip_hash_str,
"dependencies": ["pip", {
"pip": [f"-r {requirements_txt_path}"]
}]
}
file_lock_name = f"ray-{pip_hash_str}.lock"
with FileLock(os.path.join(runtime_env_dir, file_lock_name)):
try_to_create_directory(conda_dir)
with open(requirements_txt_path, "w") as file:
file.write(requirements_txt)
return conda_dict
return None
def current_ray_pip_specifier() -> Optional[str]:
"""The pip requirement specifier for the running version of Ray.
Returns:
A string which can be passed to `pip install` to install the
currently running Ray version, or None if running on a version
built from source locally (likely if you are developing Ray).
Examples:
Returns "ray[all]==1.4.0" if running the stable release
Returns "https://s3-us-west-2.amazonaws.com/ray-wheels/master/[..].whl"
if running the nightly or a specific commit
"""
if os.environ.get("RAY_CI_POST_WHEEL_TESTS"):
# Running in Buildkite CI after the wheel has been built.
# Wheels are at in the ray/.whl directory, and the present file is
# at ray/python/ray/workers. Use relative paths to allow for
# testing locally if needed.
return os.path.join(
Path(__file__).resolve().parents[3], ".whl", get_wheel_filename())
elif ray.__commit__ == "{{RAY_COMMIT_SHA}}":
# Running on a version built from source locally.
logger.warning(
"Current Ray version could not be detected, most likely "
"because you are using a version of Ray "
"built from source. If you wish to use runtime_env, "
"you can try building a wheel and including the wheel "
"explicitly as a pip dependency.")
return None
elif "dev" in ray.__version__:
# Running on a nightly wheel.
return get_master_wheel_url()
else:
return get_release_wheel_url()
def inject_dependencies(
conda_dict: Dict[Any, Any],
py_version: str,
pip_dependencies: Optional[List[str]] = None) -> Dict[Any, Any]:
"""Add Ray, Python and (optionally) extra pip dependencies to a conda dict.
Args:
conda_dict (dict): A dict representing the JSON-serialized conda
environment YAML file. This dict will be modified and returned.
py_version (str): A string representing a Python version to inject
into the conda dependencies, e.g. "3.7.7"
pip_dependencies (List[str]): A list of pip dependencies that
will be prepended to the list of pip dependencies in
the conda dict. If the conda dict does not already have a "pip"
field, one will be created.
Returns:
The modified dict. (Note: the input argument conda_dict is modified
and returned.)
"""
if pip_dependencies is None:
pip_dependencies = []
if conda_dict.get("dependencies") is None:
conda_dict["dependencies"] = []
# Inject Python dependency.
deps = conda_dict["dependencies"]
# Add current python dependency. If the user has already included a
# python version dependency, conda will raise a readable error if the two
# are incompatible, e.g:
# ResolvePackageNotFound: - python[version='3.5.*,>=3.6']
deps.append(f"python={py_version}")
if "pip" not in deps:
deps.append("pip")
# Insert pip dependencies.
found_pip_dict = False
for dep in deps:
if isinstance(dep, dict) and dep.get("pip") and isinstance(
dep["pip"], list):
dep["pip"] = pip_dependencies + dep["pip"]
found_pip_dict = True
break
if not found_pip_dict:
deps.append({"pip": pip_dependencies})
return conda_dict
if __name__ == "__main__":
setup_worker(sys.argv[1:])
|
[] |
[] |
[
"RAY_CI_POST_WHEEL_TESTS"
] |
[]
|
["RAY_CI_POST_WHEEL_TESTS"]
|
python
| 1 | 0 | |
data/MuPoTS/MuPoTS.py
|
import os
import os.path as osp
import scipy.io as sio
import numpy as np
from pycocotools.coco import COCO
from config import cfg
import json
import cv2
import random
import math
from utils.pose_utils import pixel2cam, process_bbox
from utils.vis import vis_keypoints, vis_3d_skeleton
class MuPoTS:
def __init__(self, data_split):
self.data_split = data_split
self.img_dir = osp.join('..', 'data', 'MuPoTS', 'data', 'MultiPersonTestSet')
self.test_annot_path = osp.join('..', 'data', 'MuPoTS', 'data', 'MuPoTS-3D.json')
self.human_bbox_root_dir = osp.join('..', 'data', 'MuPoTS', 'bbox_root', 'bbox_root_mupots_output.json')
self.joint_num = 21 # MuCo-3DHP
self.joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe') # MuCo-3DHP
self.original_joint_num = 17 # MuPoTS
self.original_joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head') # MuPoTS
self.flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13) )
self.skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (11, 12), (12, 13), (1, 2), (2, 3), (3, 4), (1, 5), (5, 6), (6, 7) )
self.eval_joint = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
self.joints_have_depth = True
self.root_idx = self.joints_name.index('Pelvis')
self.data = self.load_data()
def load_data(self):
if self.data_split != 'test':
print('Unknown data subset')
assert 0
data = []
db = COCO(self.test_annot_path)
# use gt bbox and root
if cfg.use_gt_info:
print("Get bounding box and root from groundtruth")
for aid in db.anns.keys():
ann = db.anns[aid]
if ann['is_valid'] == 0:
continue
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
img_path = osp.join(self.img_dir, img['file_name'])
fx, fy, cx, cy = img['intrinsic']
f = np.array([fx, fy]); c = np.array([cx, cy]);
joint_cam = np.array(ann['keypoints_cam'])
root_cam = joint_cam[self.root_idx]
joint_img = np.array(ann['keypoints_img'])
joint_img = np.concatenate([joint_img, joint_cam[:,2:]],1)
joint_img[:,2] = joint_img[:,2] - root_cam[2]
joint_vis = np.ones((self.original_joint_num,1))
bbox = np.array(ann['bbox'])
img_width, img_height = img['width'], img['height']
bbox = process_bbox(bbox, img_width, img_height)
if bbox is None: continue
data.append({
'img_path': img_path,
'bbox': bbox,
'joint_img': joint_img, # [org_img_x, org_img_y, depth - root_depth]
'joint_cam': joint_cam, # [X, Y, Z] in camera coordinate
'joint_vis': joint_vis,
'root_cam': root_cam, # [X, Y, Z] in camera coordinate
'f': f,
'c': c,
})
else:
print("Get bounding box and root from " + self.human_bbox_root_dir)
with open(self.human_bbox_root_dir) as f:
annot = json.load(f)
for i in range(len(annot)):
# if i > 100:
# break
image_id = annot[i]['image_id']
img = db.loadImgs(image_id)[0]
img_width, img_height = img['width'], img['height']
img_path = osp.join(self.img_dir, img['file_name'])
fx, fy, cx, cy = img['intrinsic']
f = np.array([fx, fy]); c = np.array([cx, cy]);
root_cam = np.array(annot[i]['root_cam']).reshape(3)
bbox = np.array(annot[i]['bbox']).reshape(4)
data.append({
'img_path': img_path,
'bbox': bbox,
'joint_img': np.zeros((self.original_joint_num, 3)), # dummy
'joint_cam': np.zeros((self.original_joint_num, 3)), # dummy
'joint_vis': np.zeros((self.original_joint_num, 1)), # dummy
'root_cam': root_cam, # [X, Y, Z] in camera coordinate
'f': f,
'c': c,
})
return data
# raw evaluate to output mat
# def evaluate(self, preds, result_dir):
#
# print('Evaluation start...')
# gts = self.data
# sample_num = len(preds)
# joint_num = self.original_joint_num
#
# pred_2d_save = {}
# pred_3d_save = {}
# for n in range(sample_num):
#
# gt = gts[n]
# f = gt['f']
# c = gt['c']
# bbox = gt['bbox']
# gt_3d_root = gt['root_cam']
# img_name = gt['img_path'].split('/')
# img_name = img_name[-2] + '_' + img_name[-1].split('.')[0] # e.g., TS1_img_0001
#
# # restore coordinates to original space
# pred_2d_kpt = preds[n].copy()
# # only consider eval_joint
# pred_2d_kpt = np.take(pred_2d_kpt, self.eval_joint, axis=0)
# pred_2d_kpt[:,0] = pred_2d_kpt[:,0] / cfg.output_shape[1] * bbox[2] + bbox[0]
# pred_2d_kpt[:,1] = pred_2d_kpt[:,1] / cfg.output_shape[0] * bbox[3] + bbox[1]
# pred_2d_kpt[:,2] = (pred_2d_kpt[:,2] / cfg.depth_dim * 2 - 1) * (cfg.bbox_3d_shape[0]/2) + gt_3d_root[2]
#
# # 2d kpt save
# if img_name in pred_2d_save:
# pred_2d_save[img_name].append(pred_2d_kpt[:,:2])
# else:
# pred_2d_save[img_name] = [pred_2d_kpt[:,:2]]
#
# vis = False
# if vis:
# cvimg = cv2.imread(gt['img_path'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# filename = str(random.randrange(1,500))
# tmpimg = cvimg.copy().astype(np.uint8)
# tmpkps = np.zeros((3,joint_num))
# tmpkps[0,:], tmpkps[1,:] = pred_2d_kpt[:,0], pred_2d_kpt[:,1]
# tmpkps[2,:] = 1
# tmpimg = vis_keypoints(tmpimg, tmpkps, self.skeleton)
# cv2.imwrite(filename + '_output.jpg', tmpimg)
#
# # back project to camera coordinate system
# pred_3d_kpt = pixel2cam(pred_2d_kpt, f, c)
#
# # 3d kpt save
# if img_name in pred_3d_save:
# pred_3d_save[img_name].append(pred_3d_kpt)
# else:
# pred_3d_save[img_name] = [pred_3d_kpt]
#
# output_path = osp.join(result_dir,'preds_2d_kpt_mupots.mat')
# sio.savemat(output_path, pred_2d_save)
# print("Testing result is saved at " + output_path)
# output_path = osp.join(result_dir,'preds_3d_kpt_mupots.mat')
# sio.savemat(output_path, pred_3d_save)
# print("Testing result is saved at " + output_path)
def evaluate(self, preds, result_dir):
print('Evaluation start...')
gts = self.data
sample_num = len(preds)
joint_num = self.original_joint_num
pred_list = []
# pred_2d_save = {}
# pred_3d_save = {}
for n in range(sample_num):
gt = gts[n]
f = gt['f']
c = gt['c']
bbox = gt['bbox']
gt_3d_root = gt['root_cam']
img_name = gt['img_path'].split('/')
img_name = img_name[-2] + '_' + img_name[-1].split('.')[0] # e.g., TS1_img_0001
# restore coordinates to original space
pred_2d_kpt = preds[n].copy()
# only consider eval_joint
pred_2d_kpt = np.take(pred_2d_kpt, self.eval_joint, axis=0)
pred_2d_kpt[:, 0] = pred_2d_kpt[:, 0] / cfg.output_shape[1] * bbox[2] + bbox[0]
pred_2d_kpt[:, 1] = pred_2d_kpt[:, 1] / cfg.output_shape[0] * bbox[3] + bbox[1]
pred_2d_kpt[:, 2] = (pred_2d_kpt[:, 2] / cfg.depth_dim * 2 - 1) * (cfg.bbox_3d_shape[0] / 2) + gt_3d_root[2]
# 2d kpt save
# if img_name in pred_2d_save:
# pred_2d_save[img_name].append(pred_2d_kpt[:, :2])
# else:
# pred_2d_save[img_name] = [pred_2d_kpt[:, :2]]
vis = False
if vis:
cvimg = cv2.imread(gt['img_path'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
filename = str(random.randrange(1, 500))
tmpimg = cvimg.copy().astype(np.uint8)
tmpkps = np.zeros((3, joint_num))
tmpkps[0, :], tmpkps[1, :] = pred_2d_kpt[:, 0], pred_2d_kpt[:, 1]
tmpkps[2, :] = 1
tmpimg = vis_keypoints(tmpimg, tmpkps, self.skeleton)
cv2.imwrite(filename + '_output.jpg', tmpimg)
# back project to camera coordinate system
pred_3d_kpt = pixel2cam(pred_2d_kpt, f, c)
# # 3d kpt save
# if img_name in pred_3d_save:
# pred_3d_save[img_name].append(pred_3d_kpt)
# else:
# pred_3d_save[img_name] = [pred_3d_kpt]
instance_case = {}
instance_case['f'] = f.tolist()
instance_case['c'] = c.tolist()
instance_case['bbox'] = bbox.tolist()
instance_case['root_cam'] = gt['root_cam'].tolist()
instance_case['img_path'] = gt['img_path']
instance_case['joint_cam'] = pred_3d_kpt.tolist()
instance_case['joint_img'] = pred_2d_kpt[:, :2].tolist()
pred_list.append(instance_case)
# output_path = osp.join(result_dir, 'preds_2d_kpt_mupots.mat')
# sio.savemat(output_path, pred_2d_save)
# print("Testing result is saved at " + output_path)
# output_path = osp.join(result_dir, 'preds_3d_kpt_mupots.mat')
# sio.savemat(output_path, pred_3d_save)
output_path = osp.join(result_dir, 'preds_2d_3d_kpt_mupots.json')
with open(output_path, 'w') as f:
json.dump(pred_list, f)
print("Testing result is saved at " + output_path)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
gomat.go
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
)
func RecursiveDataProcess(d map[string]interface{}) {
for k, v := range d {
switch dd := v.(type) {
case []interface{}:
for _, u := range dd {
uu, _ := u.(map[string]interface{})
RecursiveDataProcess(uu)
}
case map[string]interface{}:
fmt.Println(k, " :")
for l, m := range dd {
fmt.Println(" ", l, " :", m)
}
default:
fmt.Println(k, " :", dd)
}
}
}
func main() {
preamble := "https://www.materialsproject.org/rest/v1"
request_type := "materials" //"materials", "battery", "reaction", "mpquery" and "api_check"
identifier := os.Args[1]
data_type := "vasp"
mapi_key := os.Getenv("MAPI_KEY")
url := fmt.Sprintf(
"%s/%s/%s/%s?API_KEY=%s",
preamble,
request_type,
identifier,
data_type,
mapi_key)
client := &http.Client{}
req, httperr := http.NewRequest("GET", url, nil)
if httperr != nil {
fmt.Errorf("gomat: %s", httperr)
}
resp, reqerr := client.Do(req)
if reqerr != nil {
fmt.Errorf("gomat: %s", reqerr)
}
defer resp.Body.Close()
body, dataerr := ioutil.ReadAll(resp.Body)
//fmt.Printf(string(body))
if dataerr != nil {
fmt.Errorf("gomat: %s", dataerr)
}
var data interface{}
jsonerr := json.Unmarshal(body, &data)
if jsonerr != nil {
fmt.Errorf("gomat: %s", jsonerr)
}
d := data.(map[string]interface{})
RecursiveDataProcess(d)
fmt.Println("Done")
}
|
[
"\"MAPI_KEY\""
] |
[] |
[
"MAPI_KEY"
] |
[]
|
["MAPI_KEY"]
|
go
| 1 | 0 | |
commands/v2/create_space_command.go
|
package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/commands"
"code.cloudfoundry.org/cli/commands/flags"
)
type CreateSpaceCommand struct {
RequiredArgs flags.Space `positional-args:"yes"`
Organization string `short:"o" description:"Organization"`
Quota string `short:"q" description:"Quota to assign to the newly created space"`
usage interface{} `usage:"CF_NAME create-space SPACE [-o ORG] [-q SPACE-QUOTA]"`
relatedCommands interface{} `related_commands:"target, space-quotas, spaces"`
}
func (_ CreateSpaceCommand) Setup(config commands.Config, ui commands.UI) error {
return nil
}
func (_ CreateSpaceCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
|
[
"\"CF_TRACE\""
] |
[] |
[
"CF_TRACE"
] |
[]
|
["CF_TRACE"]
|
go
| 1 | 0 | |
go-apps/meep-loc-serv/server/loc-serv.go
|
/*
* Copyright (c) 2019 InterDigital Communications, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package server
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
sbi "github.com/InterDigitalInc/AdvantEDGE/go-apps/meep-loc-serv/sbi"
dkm "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-data-key-mgr"
httpLog "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-http-logger"
log "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-logger"
met "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-metrics"
redis "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-redis"
"github.com/gorilla/mux"
)
const LocServBasePath = "/location/v2/"
const locServKey = "loc-serv:"
const logModuleLocServ = "meep-loc-serv"
const serviceName = "Location Service"
const typeZone = "zone"
const typeAccessPoint = "accessPoint"
const typeUser = "user"
const typeZonalSubscription = "zonalsubs"
const typeUserSubscription = "usersubs"
const typeZoneStatusSubscription = "zonestatus"
const (
notifZonalPresence = "ZonalPresenceNotification"
notifZoneStatus = "ZoneStatusNotification"
)
type UeUserData struct {
queryZoneId []string
queryApId []string
queryAddress []string
userList *UserList
}
type ApUserData struct {
queryInterestRealm string
apList *AccessPointList
}
var nextZonalSubscriptionIdAvailable int
var nextUserSubscriptionIdAvailable int
var nextZoneStatusSubscriptionIdAvailable int
var zonalSubscriptionEnteringMap = map[int]string{}
var zonalSubscriptionLeavingMap = map[int]string{}
var zonalSubscriptionTransferringMap = map[int]string{}
var zonalSubscriptionMap = map[int]string{}
var userSubscriptionEnteringMap = map[int]string{}
var userSubscriptionLeavingMap = map[int]string{}
var userSubscriptionTransferringMap = map[int]string{}
var userSubscriptionMap = map[int]string{}
var zoneStatusSubscriptionMap = map[int]*ZoneStatusCheck{}
type ZoneStatusCheck struct {
ZoneId string
Serviceable bool
Unserviceable bool
Unknown bool
NbUsersInZoneThreshold int32
NbUsersInAPThreshold int32
}
var LOC_SERV_DB = 0
var currentStoreName = ""
var redisAddr string = "meep-redis-master.default.svc.cluster.local:6379"
var influxAddr string = "http://meep-influxdb.default.svc.cluster.local:8086"
var rc *redis.Connector
var hostUrl *url.URL
var sandboxName string
var basePath string
var baseKey string
var mutex sync.Mutex
func notImplemented(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusNotImplemented)
}
// Init - Location Service initialization
func Init() (err error) {
sandboxNameEnv := strings.TrimSpace(os.Getenv("MEEP_SANDBOX_NAME"))
if sandboxNameEnv != "" {
sandboxName = sandboxNameEnv
}
if sandboxName == "" {
err = errors.New("MEEP_SANDBOX_NAME env variable not set")
log.Error(err.Error())
return err
}
log.Info("MEEP_SANDBOX_NAME: ", sandboxName)
// hostUrl is the url of the node serving the resourceURL
// Retrieve public url address where service is reachable, if not present, use Host URL environment variable
hostUrl, err = url.Parse(strings.TrimSpace(os.Getenv("MEEP_PUBLIC_URL")))
if err != nil || hostUrl == nil || hostUrl.String() == "" {
hostUrl, err = url.Parse(strings.TrimSpace(os.Getenv("MEEP_HOST_URL")))
if err != nil {
hostUrl = new(url.URL)
}
}
log.Info("resource URL: ", hostUrl)
// Set base path
basePath = "/" + sandboxName + LocServBasePath
// Get base storage key
baseKey = dkm.GetKeyRoot(sandboxName) + locServKey
// Connect to Redis DB
rc, err = redis.NewConnector(redisAddr, LOC_SERV_DB)
if err != nil {
log.Error("Failed connection to Redis DB. Error: ", err)
return err
}
_ = rc.DBFlush(baseKey)
log.Info("Connected to Redis DB, location service table")
userTrackingReInit()
zonalTrafficReInit()
zoneStatusReInit()
// Initialize SBI
sbiCfg := sbi.SbiCfg{
SandboxName: sandboxName,
RedisAddr: redisAddr,
UserInfoCb: updateUserInfo,
ZoneInfoCb: updateZoneInfo,
ApInfoCb: updateAccessPointInfo,
ScenarioNameCb: updateStoreName,
CleanUpCb: cleanUp,
}
err = sbi.Init(sbiCfg)
if err != nil {
log.Error("Failed initialize SBI. Error: ", err)
return err
}
log.Info("SBI Initialized")
return nil
}
// Run - Start Location Service
func Run() (err error) {
return sbi.Run()
}
// Stop - Stop RNIS
func Stop() (err error) {
return sbi.Stop()
}
func deregisterZoneStatus(subsIdStr string) {
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
}
mutex.Lock()
defer mutex.Unlock()
zoneStatusSubscriptionMap[subsId] = nil
}
func registerZoneStatus(zoneId string, nbOfUsersZoneThreshold int32, nbOfUsersAPThreshold int32, opStatus []OperationStatus, subsIdStr string) {
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
}
var zoneStatus ZoneStatusCheck
if opStatus != nil {
for i := 0; i < len(opStatus); i++ {
switch opStatus[i] {
case SERVICEABLE:
zoneStatus.Serviceable = true
case UNSERVICEABLE:
zoneStatus.Unserviceable = true
case OPSTATUS_UNKNOWN:
zoneStatus.Unknown = true
default:
}
}
}
zoneStatus.NbUsersInZoneThreshold = nbOfUsersZoneThreshold
zoneStatus.NbUsersInAPThreshold = nbOfUsersAPThreshold
zoneStatus.ZoneId = zoneId
mutex.Lock()
defer mutex.Unlock()
zoneStatusSubscriptionMap[subsId] = &zoneStatus
}
func deregisterZonal(subsIdStr string) {
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
}
mutex.Lock()
defer mutex.Unlock()
zonalSubscriptionMap[subsId] = ""
zonalSubscriptionEnteringMap[subsId] = ""
zonalSubscriptionLeavingMap[subsId] = ""
zonalSubscriptionTransferringMap[subsId] = ""
}
func registerZonal(zoneId string, event []UserEventType, subsIdStr string) {
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
}
mutex.Lock()
defer mutex.Unlock()
if event != nil {
for i := 0; i < len(event); i++ {
switch event[i] {
case ENTERING_EVENT:
zonalSubscriptionEnteringMap[subsId] = zoneId
case LEAVING_EVENT:
zonalSubscriptionLeavingMap[subsId] = zoneId
case TRANSFERRING_EVENT:
zonalSubscriptionTransferringMap[subsId] = zoneId
default:
}
}
} else {
zonalSubscriptionEnteringMap[subsId] = zoneId
zonalSubscriptionLeavingMap[subsId] = zoneId
zonalSubscriptionTransferringMap[subsId] = zoneId
}
zonalSubscriptionMap[subsId] = zoneId
}
func deregisterUser(subsIdStr string) {
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
}
mutex.Lock()
defer mutex.Unlock()
userSubscriptionMap[subsId] = ""
userSubscriptionEnteringMap[subsId] = ""
userSubscriptionLeavingMap[subsId] = ""
userSubscriptionTransferringMap[subsId] = ""
}
func registerUser(userAddress string, event []UserEventType, subsIdStr string) {
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
}
mutex.Lock()
defer mutex.Unlock()
if event != nil {
for i := 0; i < len(event); i++ {
switch event[i] {
case ENTERING_EVENT:
userSubscriptionEnteringMap[subsId] = userAddress
case LEAVING_EVENT:
userSubscriptionLeavingMap[subsId] = userAddress
case TRANSFERRING_EVENT:
userSubscriptionTransferringMap[subsId] = userAddress
default:
}
}
} else {
userSubscriptionEnteringMap[subsId] = userAddress
userSubscriptionLeavingMap[subsId] = userAddress
userSubscriptionTransferringMap[subsId] = userAddress
}
userSubscriptionMap[subsId] = userAddress
}
func checkNotificationRegisteredZoneStatus(zoneId string, apId string, nbUsersInAP int32, nbUsersInZone int32, previousNbUsersInAP int32, previousNbUsersInZone int32) {
mutex.Lock()
defer mutex.Unlock()
//check all that applies
for subsId, zoneStatus := range zoneStatusSubscriptionMap {
if zoneStatus == nil {
continue
}
if zoneStatus.ZoneId == zoneId {
zoneWarning := false
apWarning := false
if nbUsersInZone != -1 {
if previousNbUsersInZone != nbUsersInZone && nbUsersInZone >= zoneStatus.NbUsersInZoneThreshold {
zoneWarning = true
}
}
if nbUsersInAP != -1 {
if previousNbUsersInAP != nbUsersInAP && nbUsersInAP >= zoneStatus.NbUsersInAPThreshold {
apWarning = true
}
}
if zoneWarning || apWarning {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+typeZoneStatusSubscription+":"+subsIdStr, ".")
if jsonInfo == "" {
return
}
subscription := convertJsonToZoneStatusSubscription(jsonInfo)
var zoneStatusNotif ZoneStatusNotification
zoneStatusNotif.ZoneId = zoneId
if apWarning {
zoneStatusNotif.AccessPointId = apId
zoneStatusNotif.NumberOfUsersInAP = nbUsersInAP
}
if zoneWarning {
zoneStatusNotif.NumberOfUsersInZone = nbUsersInZone
}
seconds := time.Now().Unix()
var timestamp TimeStamp
timestamp.Seconds = int32(seconds)
zoneStatusNotif.Timestamp = ×tamp
var inlineZoneStatusNotification InlineZoneStatusNotification
inlineZoneStatusNotification.ZoneStatusNotification = &zoneStatusNotif
sendStatusNotification(subscription.CallbackReference.NotifyURL, inlineZoneStatusNotification)
if apWarning {
log.Info("Zone Status Notification" + "(" + subsIdStr + "): " + "For event in zone " + zoneId + " which has " + strconv.Itoa(int(nbUsersInAP)) + " users in AP " + apId)
} else {
log.Info("Zone Status Notification" + "(" + subsIdStr + "): " + "For event in zone " + zoneId + " which has " + strconv.Itoa(int(nbUsersInZone)) + " users in total")
}
}
}
}
}
func checkNotificationRegisteredUsers(oldZoneId string, newZoneId string, oldApId string, newApId string, userId string) {
mutex.Lock()
defer mutex.Unlock()
//check all that applies
for subsId, value := range userSubscriptionMap {
if value == userId {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+typeUserSubscription+":"+subsIdStr, ".")
if jsonInfo == "" {
return
}
subscription := convertJsonToUserSubscription(jsonInfo)
var zonal ZonalPresenceNotification
zonal.Address = userId
seconds := time.Now().Unix()
var timestamp TimeStamp
timestamp.Seconds = int32(seconds)
zonal.Timestamp = ×tamp
zonal.CallbackData = subscription.ClientCorrelator
if newZoneId != oldZoneId {
//process LEAVING events prior to entering ones
if oldZoneId != "" {
if userSubscriptionLeavingMap[subsId] != "" {
zonal.ZoneId = oldZoneId
zonal.CurrentAccessPointId = oldApId
event := new(UserEventType)
*event = LEAVING_EVENT
zonal.UserEventType = event
var inlineZonal InlineZonalPresenceNotification
inlineZonal.ZonalPresenceNotification = &zonal
sendZonalPresenceNotification(subscription.CallbackReference.NotifyURL, inlineZonal)
log.Info("User Notification" + "(" + subsIdStr + "): " + "Leaving event in zone " + oldZoneId + " for user " + userId)
}
}
if userSubscriptionEnteringMap[subsId] != "" && newZoneId != "" {
zonal.ZoneId = newZoneId
zonal.CurrentAccessPointId = newApId
event := new(UserEventType)
*event = ENTERING_EVENT
zonal.UserEventType = event
var inlineZonal InlineZonalPresenceNotification
inlineZonal.ZonalPresenceNotification = &zonal
sendZonalPresenceNotification(subscription.CallbackReference.NotifyURL, inlineZonal)
log.Info("User Notification" + "(" + subsIdStr + "): " + "Entering event in zone " + newZoneId + " for user " + userId)
}
} else {
if newApId != oldApId {
if userSubscriptionTransferringMap[subsId] != "" {
zonal.ZoneId = newZoneId
zonal.CurrentAccessPointId = newApId
zonal.PreviousAccessPointId = oldApId
event := new(UserEventType)
*event = TRANSFERRING_EVENT
zonal.UserEventType = event
var inlineZonal InlineZonalPresenceNotification
inlineZonal.ZonalPresenceNotification = &zonal
sendZonalPresenceNotification(subscription.CallbackReference.NotifyURL, inlineZonal)
log.Info("User Notification" + "(" + subsIdStr + "): " + " Transferring event within zone " + newZoneId + " for user " + userId + " from Ap " + oldApId + " to " + newApId)
}
}
}
}
}
}
func sendZonalPresenceNotification(notifyUrl string, notification InlineZonalPresenceNotification) {
startTime := time.Now()
jsonNotif, err := json.Marshal(notification)
if err != nil {
log.Error(err)
return
}
resp, err := http.Post(notifyUrl, "application/json", bytes.NewBuffer(jsonNotif))
duration := float64(time.Since(startTime).Microseconds()) / 1000.0
_ = httpLog.LogTx(notifyUrl, "POST", string(jsonNotif), resp, startTime)
if err != nil {
log.Error(err)
met.ObserveNotification(sandboxName, serviceName, notifZonalPresence, notifyUrl, nil, duration)
return
}
met.ObserveNotification(sandboxName, serviceName, notifZonalPresence, notifyUrl, resp, duration)
defer resp.Body.Close()
}
func sendStatusNotification(notifyUrl string, notification InlineZoneStatusNotification) {
startTime := time.Now()
jsonNotif, err := json.Marshal(notification)
if err != nil {
log.Error(err)
return
}
resp, err := http.Post(notifyUrl, "application/json", bytes.NewBuffer(jsonNotif))
duration := float64(time.Since(startTime).Microseconds()) / 1000.0
_ = httpLog.LogTx(notifyUrl, "POST", string(jsonNotif), resp, startTime)
if err != nil {
log.Error(err)
met.ObserveNotification(sandboxName, serviceName, notifZoneStatus, notifyUrl, nil, duration)
return
}
met.ObserveNotification(sandboxName, serviceName, notifZoneStatus, notifyUrl, resp, duration)
defer resp.Body.Close()
}
func checkNotificationRegisteredZones(oldZoneId string, newZoneId string, oldApId string, newApId string, userId string) {
mutex.Lock()
defer mutex.Unlock()
//check all that applies
for subsId, value := range zonalSubscriptionMap {
if value == newZoneId {
if newZoneId != oldZoneId {
if zonalSubscriptionEnteringMap[subsId] != "" {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+typeZonalSubscription+":"+subsIdStr, ".")
if jsonInfo != "" {
subscription := convertJsonToZonalSubscription(jsonInfo)
var zonal ZonalPresenceNotification
zonal.ZoneId = newZoneId
zonal.CurrentAccessPointId = newApId
zonal.Address = userId
event := new(UserEventType)
*event = ENTERING_EVENT
zonal.UserEventType = event
seconds := time.Now().Unix()
var timestamp TimeStamp
timestamp.Seconds = int32(seconds)
zonal.Timestamp = ×tamp
zonal.CallbackData = subscription.ClientCorrelator
var inlineZonal InlineZonalPresenceNotification
inlineZonal.ZonalPresenceNotification = &zonal
sendZonalPresenceNotification(subscription.CallbackReference.NotifyURL, inlineZonal)
log.Info("Zonal Notify Entering event in zone " + newZoneId + " for user " + userId)
}
}
} else {
if newApId != oldApId {
if zonalSubscriptionTransferringMap[subsId] != "" {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+typeZonalSubscription+":"+subsIdStr, ".")
if jsonInfo != "" {
subscription := convertJsonToZonalSubscription(jsonInfo)
var zonal ZonalPresenceNotification
zonal.ZoneId = newZoneId
zonal.CurrentAccessPointId = newApId
zonal.PreviousAccessPointId = oldApId
zonal.Address = userId
event := new(UserEventType)
*event = TRANSFERRING_EVENT
zonal.UserEventType = event
seconds := time.Now().Unix()
var timestamp TimeStamp
timestamp.Seconds = int32(seconds)
zonal.Timestamp = ×tamp
zonal.CallbackData = subscription.ClientCorrelator
var inlineZonal InlineZonalPresenceNotification
inlineZonal.ZonalPresenceNotification = &zonal
sendZonalPresenceNotification(subscription.CallbackReference.NotifyURL, inlineZonal)
log.Info("Zonal Notify Transferring event in zone " + newZoneId + " for user " + userId + " from Ap " + oldApId + " to " + newApId)
}
}
}
}
} else {
if value == oldZoneId {
if zonalSubscriptionLeavingMap[subsId] != "" {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+typeZonalSubscription+":"+subsIdStr, ".")
if jsonInfo != "" {
subscription := convertJsonToZonalSubscription(jsonInfo)
var zonal ZonalPresenceNotification
zonal.ZoneId = oldZoneId
zonal.CurrentAccessPointId = oldApId
zonal.Address = userId
event := new(UserEventType)
*event = LEAVING_EVENT
zonal.UserEventType = event
seconds := time.Now().Unix()
var timestamp TimeStamp
timestamp.Seconds = int32(seconds)
zonal.Timestamp = ×tamp
zonal.CallbackData = subscription.ClientCorrelator
var inlineZonal InlineZonalPresenceNotification
inlineZonal.ZonalPresenceNotification = &zonal
sendZonalPresenceNotification(subscription.CallbackReference.NotifyURL, inlineZonal)
log.Info("Zonal Notify Leaving event in zone " + oldZoneId + " for user " + userId)
}
}
}
}
}
}
func usersGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var userData UeUserData
// Retrieve query parameters
u, _ := url.Parse(r.URL.String())
log.Info("url: ", u.RequestURI())
q := u.Query()
userData.queryZoneId = q["zoneId"]
userData.queryApId = q["accessPointId"]
userData.queryAddress = q["address"]
validQueryParams := []string{"zoneId", "accessPointId", "address"}
//look for all query parameters to reject if any invalid ones
found := false
for queryParam := range q {
found = false
for _, validQueryParam := range validQueryParams {
if queryParam == validQueryParam {
found = true
break
}
}
if !found {
log.Error("Query param not valid: ", queryParam)
w.WriteHeader(http.StatusBadRequest)
return
}
}
// Get user list from DB
var response InlineUserList
var userList UserList
userList.ResourceURL = hostUrl.String() + basePath + "queries/users"
response.UserList = &userList
userData.userList = &userList
keyName := baseKey + typeUser + ":*"
err := rc.ForEachJSONEntry(keyName, populateUserList, &userData)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Send response
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func populateUserList(key string, jsonInfo string, userData interface{}) error {
// Get query params & userlist from user data
data := userData.(*UeUserData)
if data == nil || data.userList == nil {
return errors.New("userList not found in userData")
}
// Retrieve user info from DB
var userInfo UserInfo
err := json.Unmarshal([]byte(jsonInfo), &userInfo)
if err != nil {
return err
}
// Ignore entries with no zoneID or AP ID
if userInfo.ZoneId == "" || userInfo.AccessPointId == "" {
return nil
}
//query parameters looked through using OR within same query parameter and AND between different query parameters
//example returning users matching zoneId : (zone01 OR zone02) AND accessPointId : (ap1 OR ap2 OR ap3) AND address: (ipAddress1 OR ipAddress2)
foundAMatch := false
// Filter using query params
if len(data.queryZoneId) > 0 {
foundAMatch = false
for _, queryZoneId := range data.queryZoneId {
if userInfo.ZoneId == queryZoneId {
foundAMatch = true
}
}
if !foundAMatch {
return nil
}
}
if len(data.queryApId) > 0 {
foundAMatch = false
for _, queryApId := range data.queryApId {
if userInfo.AccessPointId == queryApId {
foundAMatch = true
}
}
if !foundAMatch {
return nil
}
}
if len(data.queryAddress) > 0 {
foundAMatch = false
for _, queryAddress := range data.queryAddress {
if userInfo.Address == queryAddress {
foundAMatch = true
}
}
if !foundAMatch {
return nil
}
}
// Add user info to list
data.userList.User = append(data.userList.User, userInfo)
return nil
}
func apGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var userData ApUserData
vars := mux.Vars(r)
// Retrieve query parameters
u, _ := url.Parse(r.URL.String())
log.Info("url: ", u.RequestURI())
q := u.Query()
userData.queryInterestRealm = q.Get("interestRealm")
validQueryParams := []string{"interestRealm"}
//look for all query parameters to reject if any invalid ones
found := false
for queryParam := range q {
found = false
for _, validQueryParam := range validQueryParams {
if queryParam == validQueryParam {
found = true
break
}
}
if !found {
log.Error("Query param not valid: ", queryParam)
w.WriteHeader(http.StatusBadRequest)
return
}
}
// Get user list from DB
var response InlineAccessPointList
var apList AccessPointList
apList.ZoneId = vars["zoneId"]
apList.ResourceURL = hostUrl.String() + basePath + "queries/zones/" + vars["zoneId"] + "/accessPoints"
response.AccessPointList = &apList
userData.apList = &apList
//make sure the zone exists first
jsonZoneInfo, _ := rc.JSONGetEntry(baseKey+typeZone+":"+vars["zoneId"], ".")
if jsonZoneInfo == "" {
w.WriteHeader(http.StatusNotFound)
return
}
keyName := baseKey + typeZone + ":" + vars["zoneId"] + ":*"
err := rc.ForEachJSONEntry(keyName, populateApList, &userData)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Send response
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func apByIdGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineAccessPointInfo
var apInfo AccessPointInfo
response.AccessPointInfo = &apInfo
jsonApInfo, _ := rc.JSONGetEntry(baseKey+typeZone+":"+vars["zoneId"]+":"+typeAccessPoint+":"+vars["accessPointId"], ".")
if jsonApInfo == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := json.Unmarshal([]byte(jsonApInfo), &apInfo)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func zonesGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response InlineZoneList
var zoneList ZoneList
zoneList.ResourceURL = hostUrl.String() + basePath + "queries/zones"
response.ZoneList = &zoneList
keyName := baseKey + typeZone + ":*"
err := rc.ForEachJSONEntry(keyName, populateZoneList, &zoneList)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func zonesByIdGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineZoneInfo
var zoneInfo ZoneInfo
response.ZoneInfo = &zoneInfo
jsonZoneInfo, _ := rc.JSONGetEntry(baseKey+typeZone+":"+vars["zoneId"], ".")
if jsonZoneInfo == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := json.Unmarshal([]byte(jsonZoneInfo), &zoneInfo)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func populateZoneList(key string, jsonInfo string, userData interface{}) error {
zoneList := userData.(*ZoneList)
var zoneInfo ZoneInfo
// Format response
err := json.Unmarshal([]byte(jsonInfo), &zoneInfo)
if err != nil {
return err
}
if zoneInfo.ZoneId != "" {
zoneList.Zone = append(zoneList.Zone, zoneInfo)
}
return nil
}
func populateApList(key string, jsonInfo string, userData interface{}) error {
// Get query params & aplist from user data
data := userData.(*ApUserData)
if data == nil || data.apList == nil {
return errors.New("apList not found in userData")
}
// Retrieve AP info from DB
var apInfo AccessPointInfo
err := json.Unmarshal([]byte(jsonInfo), &apInfo)
if err != nil {
return err
}
// Ignore entries with no AP ID
if apInfo.AccessPointId == "" {
return nil
}
// Filter using query params
if data.queryInterestRealm != "" && apInfo.InterestRealm != data.queryInterestRealm {
return nil
}
// Add AP info to list
data.apList.AccessPoint = append(data.apList.AccessPoint, apInfo)
return nil
}
func userTrackingSubDelete(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
present, _ := rc.JSONGetEntry(baseKey+typeUserSubscription+":"+vars["subscriptionId"], ".")
if present == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := rc.JSONDelEntry(baseKey+typeUserSubscription+":"+vars["subscriptionId"], ".")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
deregisterUser(vars["subscriptionId"])
w.WriteHeader(http.StatusNoContent)
}
func userTrackingSubListGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response InlineNotificationSubscriptionList
var userTrackingSubList NotificationSubscriptionList
userTrackingSubList.ResourceURL = hostUrl.String() + basePath + "subscriptions/userTracking"
response.NotificationSubscriptionList = &userTrackingSubList
keyName := baseKey + typeUserSubscription + "*"
err := rc.ForEachJSONEntry(keyName, populateUserTrackingList, &userTrackingSubList)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func userTrackingSubGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineUserTrackingSubscription
var userTrackingSub UserTrackingSubscription
response.UserTrackingSubscription = &userTrackingSub
jsonUserTrackingSub, _ := rc.JSONGetEntry(baseKey+typeUserSubscription+":"+vars["subscriptionId"], ".")
if jsonUserTrackingSub == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := json.Unmarshal([]byte(jsonUserTrackingSub), &userTrackingSub)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func userTrackingSubPost(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response InlineUserTrackingSubscription
var body InlineUserTrackingSubscription
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&body)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
userTrackingSub := body.UserTrackingSubscription
if userTrackingSub == nil {
log.Error("Body not present")
http.Error(w, "Body not present", http.StatusBadRequest)
return
}
//checking for mandatory properties
if userTrackingSub.CallbackReference == nil || userTrackingSub.CallbackReference.NotifyURL == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
if userTrackingSub.Address == "" {
log.Error("Mandatory Address parameter not present")
http.Error(w, "Mandatory Address parameter not present", http.StatusBadRequest)
return
}
newSubsId := nextUserSubscriptionIdAvailable
nextUserSubscriptionIdAvailable++
subsIdStr := strconv.Itoa(newSubsId)
registerUser(userTrackingSub.Address, userTrackingSub.UserEventCriteria, subsIdStr)
userTrackingSub.ResourceURL = hostUrl.String() + basePath + "subscriptions/userTracking/" + subsIdStr
_ = rc.JSONSetEntry(baseKey+typeUserSubscription+":"+subsIdStr, ".", convertUserSubscriptionToJson(userTrackingSub))
response.UserTrackingSubscription = userTrackingSub
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, string(jsonResponse))
}
func userTrackingSubPut(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineUserTrackingSubscription
var body InlineUserTrackingSubscription
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&body)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
userTrackingSub := body.UserTrackingSubscription
if userTrackingSub == nil {
log.Error("Body not present")
http.Error(w, "Body not present", http.StatusBadRequest)
return
}
//checking for mandatory properties
if userTrackingSub.CallbackReference == nil || userTrackingSub.CallbackReference.NotifyURL == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
if userTrackingSub.Address == "" {
log.Error("Mandatory Address parameter not present")
http.Error(w, "Mandatory Address parameter not present", http.StatusBadRequest)
return
}
if userTrackingSub.ResourceURL == "" {
log.Error("Mandatory ResourceURL parameter not present")
http.Error(w, "Mandatory ResourceURL parameter not present", http.StatusBadRequest)
return
}
subsIdParamStr := vars["subscriptionId"]
selfUrl := strings.Split(userTrackingSub.ResourceURL, "/")
subsIdStr := selfUrl[len(selfUrl)-1]
//Body content not matching parameters
if subsIdStr != subsIdParamStr {
log.Error("SubscriptionId in endpoint and in body not matching")
http.Error(w, "SubscriptionId in endpoint and in body not matching", http.StatusBadRequest)
return
}
userTrackingSub.ResourceURL = hostUrl.String() + basePath + "subscriptions/userTracking/" + subsIdStr
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if userSubscriptionMap[subsId] == "" {
w.WriteHeader(http.StatusNotFound)
return
}
_ = rc.JSONSetEntry(baseKey+typeUserSubscription+":"+subsIdStr, ".", convertUserSubscriptionToJson(userTrackingSub))
deregisterUser(subsIdStr)
registerUser(userTrackingSub.Address, userTrackingSub.UserEventCriteria, subsIdStr)
response.UserTrackingSubscription = userTrackingSub
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func populateUserTrackingList(key string, jsonInfo string, userData interface{}) error {
userList := userData.(*NotificationSubscriptionList)
var userInfo UserTrackingSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &userInfo)
if err != nil {
return err
}
userList.UserTrackingSubscription = append(userList.UserTrackingSubscription, userInfo)
return nil
}
func zonalTrafficSubDelete(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
present, _ := rc.JSONGetEntry(baseKey+typeZonalSubscription+":"+vars["subscriptionId"], ".")
if present == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := rc.JSONDelEntry(baseKey+typeZonalSubscription+":"+vars["subscriptionId"], ".")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
deregisterZonal(vars["subscriptionId"])
w.WriteHeader(http.StatusNoContent)
}
func zonalTrafficSubListGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response InlineNotificationSubscriptionList
var zonalTrafficSubList NotificationSubscriptionList
zonalTrafficSubList.ResourceURL = hostUrl.String() + basePath + "subscriptions/zonalTraffic"
response.NotificationSubscriptionList = &zonalTrafficSubList
keyName := baseKey + typeZonalSubscription + "*"
err := rc.ForEachJSONEntry(keyName, populateZonalTrafficList, &zonalTrafficSubList)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func zonalTrafficSubGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineZonalTrafficSubscription
var zonalTrafficSub ZonalTrafficSubscription
response.ZonalTrafficSubscription = &zonalTrafficSub
jsonZonalTrafficSub, _ := rc.JSONGetEntry(baseKey+typeZonalSubscription+":"+vars["subscriptionId"], ".")
if jsonZonalTrafficSub == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := json.Unmarshal([]byte(jsonZonalTrafficSub), &zonalTrafficSub)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func zonalTrafficSubPost(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response InlineZonalTrafficSubscription
var body InlineZonalTrafficSubscription
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&body)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
zonalTrafficSub := body.ZonalTrafficSubscription
if zonalTrafficSub == nil {
log.Error("Body not present")
http.Error(w, "Body not present", http.StatusBadRequest)
return
}
//checking for mandatory properties
if zonalTrafficSub.CallbackReference == nil || zonalTrafficSub.CallbackReference.NotifyURL == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
if zonalTrafficSub.ZoneId == "" {
log.Error("Mandatory ZoneId parameter not present")
http.Error(w, "Mandatory ZoneId parameter not present", http.StatusBadRequest)
return
}
newSubsId := nextZonalSubscriptionIdAvailable
nextZonalSubscriptionIdAvailable++
subsIdStr := strconv.Itoa(newSubsId)
/*
if zonalTrafficSub.Duration > 0 {
//TODO start a timer mecanism and expire subscription
}
//else, lasts forever or until subscription is deleted
*/
if zonalTrafficSub.Duration != 0 { //used to be string -> zonalTrafficSub.Duration != "" && zonalTrafficSub.Duration != "0" {
//TODO start a timer mecanism and expire subscription
log.Info("Non zero duration")
}
//else, lasts forever or until subscription is deleted
zonalTrafficSub.ResourceURL = hostUrl.String() + basePath + "subscriptions/zonalTraffic/" + subsIdStr
_ = rc.JSONSetEntry(baseKey+typeZonalSubscription+":"+subsIdStr, ".", convertZonalSubscriptionToJson(zonalTrafficSub))
registerZonal(zonalTrafficSub.ZoneId, zonalTrafficSub.UserEventCriteria, subsIdStr)
response.ZonalTrafficSubscription = zonalTrafficSub
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, string(jsonResponse))
}
func zonalTrafficSubPut(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineZonalTrafficSubscription
var body InlineZonalTrafficSubscription
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&body)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
zonalTrafficSub := body.ZonalTrafficSubscription
if zonalTrafficSub == nil {
log.Error("Body not present")
http.Error(w, "Body not present", http.StatusBadRequest)
return
}
//checking for mandatory properties
if zonalTrafficSub.CallbackReference == nil || zonalTrafficSub.CallbackReference.NotifyURL == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
if zonalTrafficSub.ZoneId == "" {
log.Error("Mandatory ZoneId parameter not present")
http.Error(w, "Mandatory ZoneId parameter not present", http.StatusBadRequest)
return
}
if zonalTrafficSub.ResourceURL == "" {
log.Error("Mandatory ResourceURL parameter not present")
http.Error(w, "Mandatory ResourceURL parameter not present", http.StatusBadRequest)
return
}
subsIdParamStr := vars["subscriptionId"]
selfUrl := strings.Split(zonalTrafficSub.ResourceURL, "/")
subsIdStr := selfUrl[len(selfUrl)-1]
//body content not matching parameters
if subsIdStr != subsIdParamStr {
log.Error("SubscriptionId in endpoint and in body not matching")
http.Error(w, "SubscriptionId in endpoint and in body not matching", http.StatusBadRequest)
return
}
zonalTrafficSub.ResourceURL = hostUrl.String() + basePath + "subscriptions/zonalTraffic/" + subsIdStr
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if zonalSubscriptionMap[subsId] == "" {
w.WriteHeader(http.StatusNotFound)
return
}
_ = rc.JSONSetEntry(baseKey+typeZonalSubscription+":"+subsIdStr, ".", convertZonalSubscriptionToJson(zonalTrafficSub))
deregisterZonal(subsIdStr)
registerZonal(zonalTrafficSub.ZoneId, zonalTrafficSub.UserEventCriteria, subsIdStr)
response.ZonalTrafficSubscription = zonalTrafficSub
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func populateZonalTrafficList(key string, jsonInfo string, userData interface{}) error {
zoneList := userData.(*NotificationSubscriptionList)
var zoneInfo ZonalTrafficSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &zoneInfo)
if err != nil {
return err
}
zoneList.ZonalTrafficSubscription = append(zoneList.ZonalTrafficSubscription, zoneInfo)
return nil
}
func zoneStatusSubDelete(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
present, _ := rc.JSONGetEntry(baseKey+typeZoneStatusSubscription+":"+vars["subscriptionId"], ".")
if present == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := rc.JSONDelEntry(baseKey+typeZoneStatusSubscription+":"+vars["subscriptionId"], ".")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
deregisterZoneStatus(vars["subscriptionId"])
w.WriteHeader(http.StatusNoContent)
}
func zoneStatusSubListGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response InlineNotificationSubscriptionList
var zoneStatusSubList NotificationSubscriptionList
zoneStatusSubList.ResourceURL = hostUrl.String() + basePath + "subscriptions/zoneStatus"
response.NotificationSubscriptionList = &zoneStatusSubList
keyName := baseKey + typeZoneStatusSubscription + "*"
err := rc.ForEachJSONEntry(keyName, populateZoneStatusList, &zoneStatusSubList)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func zoneStatusSubGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineZoneStatusSubscription
var zoneStatusSub ZoneStatusSubscription
response.ZoneStatusSubscription = &zoneStatusSub
jsonZoneStatusSub, _ := rc.JSONGetEntry(baseKey+typeZoneStatusSubscription+":"+vars["subscriptionId"], ".")
if jsonZoneStatusSub == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := json.Unmarshal([]byte(jsonZoneStatusSub), &zoneStatusSub)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func zoneStatusSubPost(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response InlineZoneStatusSubscription
var body InlineZoneStatusSubscription
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&body)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
zoneStatusSub := body.ZoneStatusSubscription
if zoneStatusSub == nil {
log.Error("Body not present")
http.Error(w, "Body not present", http.StatusBadRequest)
return
}
//checking for mandatory properties
if zoneStatusSub.CallbackReference == nil || zoneStatusSub.CallbackReference.NotifyURL == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
if zoneStatusSub.ZoneId == "" {
log.Error("Mandatory ZoneId parameter not present")
http.Error(w, "Mandatory ZoneId parameter not present", http.StatusBadRequest)
return
}
newSubsId := nextZoneStatusSubscriptionIdAvailable
nextZoneStatusSubscriptionIdAvailable++
subsIdStr := strconv.Itoa(newSubsId)
zoneStatusSub.ResourceURL = hostUrl.String() + basePath + "subscriptions/zoneStatus/" + subsIdStr
_ = rc.JSONSetEntry(baseKey+typeZoneStatusSubscription+":"+subsIdStr, ".", convertZoneStatusSubscriptionToJson(zoneStatusSub))
registerZoneStatus(zoneStatusSub.ZoneId, zoneStatusSub.NumberOfUsersZoneThreshold, zoneStatusSub.NumberOfUsersAPThreshold,
zoneStatusSub.OperationStatus, subsIdStr)
response.ZoneStatusSubscription = zoneStatusSub
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, string(jsonResponse))
}
func zoneStatusSubPut(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
var response InlineZoneStatusSubscription
var body InlineZoneStatusSubscription
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&body)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
zoneStatusSub := body.ZoneStatusSubscription
if zoneStatusSub == nil {
log.Error("Body not present")
http.Error(w, "Body not present", http.StatusBadRequest)
return
}
//checking for mandatory properties
if zoneStatusSub.CallbackReference == nil || zoneStatusSub.CallbackReference.NotifyURL == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
if zoneStatusSub.ZoneId == "" {
log.Error("Mandatory ZoneId parameter not present")
http.Error(w, "Mandatory ZoneId parameter not present", http.StatusBadRequest)
return
}
if zoneStatusSub.ResourceURL == "" {
log.Error("Mandatory ResourceURL parameter not present")
http.Error(w, "Mandatory ResourceURL parameter not present", http.StatusBadRequest)
return
}
subsIdParamStr := vars["subscriptionId"]
selfUrl := strings.Split(zoneStatusSub.ResourceURL, "/")
subsIdStr := selfUrl[len(selfUrl)-1]
//body content not matching parameters
if subsIdStr != subsIdParamStr {
log.Error("SubscriptionId in endpoint and in body not matching")
http.Error(w, "SubscriptionId in endpoint and in body not matching", http.StatusBadRequest)
return
}
zoneStatusSub.ResourceURL = hostUrl.String() + basePath + "subscriptions/zoneStatus/" + subsIdStr
subsId, err := strconv.Atoi(subsIdStr)
if err != nil {
log.Error(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if zoneStatusSubscriptionMap[subsId] == nil {
w.WriteHeader(http.StatusNotFound)
return
}
_ = rc.JSONSetEntry(baseKey+typeZoneStatusSubscription+":"+subsIdStr, ".", convertZoneStatusSubscriptionToJson(zoneStatusSub))
deregisterZoneStatus(subsIdStr)
registerZoneStatus(zoneStatusSub.ZoneId, zoneStatusSub.NumberOfUsersZoneThreshold, zoneStatusSub.NumberOfUsersAPThreshold,
zoneStatusSub.OperationStatus, subsIdStr)
response.ZoneStatusSubscription = zoneStatusSub
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func populateZoneStatusList(key string, jsonInfo string, userData interface{}) error {
zoneList := userData.(*NotificationSubscriptionList)
var zoneInfo ZoneStatusSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &zoneInfo)
if err != nil {
return err
}
zoneList.ZoneStatusSubscription = append(zoneList.ZoneStatusSubscription, zoneInfo)
return nil
}
func cleanUp() {
log.Info("Terminate all")
rc.DBFlush(baseKey)
nextZonalSubscriptionIdAvailable = 1
nextUserSubscriptionIdAvailable = 1
nextZoneStatusSubscriptionIdAvailable = 1
mutex.Lock()
defer mutex.Unlock()
zonalSubscriptionEnteringMap = map[int]string{}
zonalSubscriptionLeavingMap = map[int]string{}
zonalSubscriptionTransferringMap = map[int]string{}
zonalSubscriptionMap = map[int]string{}
userSubscriptionEnteringMap = map[int]string{}
userSubscriptionLeavingMap = map[int]string{}
userSubscriptionTransferringMap = map[int]string{}
userSubscriptionMap = map[int]string{}
zoneStatusSubscriptionMap = map[int]*ZoneStatusCheck{}
updateStoreName("")
}
func updateStoreName(storeName string) {
if currentStoreName != storeName {
currentStoreName = storeName
_ = httpLog.ReInit(logModuleLocServ, sandboxName, storeName, redisAddr, influxAddr)
}
}
func updateUserInfo(address string, zoneId string, accessPointId string, longitude *float32, latitude *float32) {
var oldZoneId string
var oldApId string
// Get User Info from DB
jsonUserInfo, _ := rc.JSONGetEntry(baseKey+typeUser+":"+address, ".")
userInfo := convertJsonToUserInfo(jsonUserInfo)
// Create new user info if necessary
if userInfo == nil {
userInfo = new(UserInfo)
userInfo.Address = address
userInfo.ResourceURL = hostUrl.String() + basePath + "queries/users?address=" + address
} else {
// Get old zone & AP IDs
oldZoneId = userInfo.ZoneId
oldApId = userInfo.AccessPointId
}
userInfo.ZoneId = zoneId
userInfo.AccessPointId = accessPointId
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
userInfo.Timestamp = &timeStamp
// Update position
if longitude == nil || latitude == nil {
userInfo.LocationInfo = nil
} else {
if userInfo.LocationInfo == nil {
userInfo.LocationInfo = new(LocationInfo)
}
//we only support shape == 2 in locationInfo, so we ignore any conditional parameters based on shape
userInfo.LocationInfo.Shape = 2
userInfo.LocationInfo.Longitude = nil
userInfo.LocationInfo.Longitude = append(userInfo.LocationInfo.Longitude, *longitude)
userInfo.LocationInfo.Latitude = nil
userInfo.LocationInfo.Latitude = append(userInfo.LocationInfo.Latitude, *latitude)
userInfo.LocationInfo.Timestamp = &timeStamp
}
// Update User info in DB & Send notifications
_ = rc.JSONSetEntry(baseKey+typeUser+":"+address, ".", convertUserInfoToJson(userInfo))
checkNotificationRegisteredUsers(oldZoneId, zoneId, oldApId, accessPointId, address)
checkNotificationRegisteredZones(oldZoneId, zoneId, oldApId, accessPointId, address)
}
func updateZoneInfo(zoneId string, nbAccessPoints int, nbUnsrvAccessPoints int, nbUsers int) {
// Get Zone Info from DB
jsonZoneInfo, _ := rc.JSONGetEntry(baseKey+typeZone+":"+zoneId, ".")
zoneInfo := convertJsonToZoneInfo(jsonZoneInfo)
// Create new zone info if necessary
if zoneInfo == nil {
zoneInfo = new(ZoneInfo)
zoneInfo.ZoneId = zoneId
zoneInfo.ResourceURL = hostUrl.String() + basePath + "queries/zones/" + zoneId
}
previousNbUsers := zoneInfo.NumberOfUsers
// Update info
if nbAccessPoints != -1 {
zoneInfo.NumberOfAccessPoints = int32(nbAccessPoints)
}
if nbUnsrvAccessPoints != -1 {
zoneInfo.NumberOfUnserviceableAccessPoints = int32(nbUnsrvAccessPoints)
}
if nbUsers != -1 {
zoneInfo.NumberOfUsers = int32(nbUsers)
}
// Update Zone info in DB & Send notifications
_ = rc.JSONSetEntry(baseKey+typeZone+":"+zoneId, ".", convertZoneInfoToJson(zoneInfo))
checkNotificationRegisteredZoneStatus(zoneId, "", int32(-1), int32(nbUsers), int32(-1), previousNbUsers)
}
func updateAccessPointInfo(zoneId string, apId string, conTypeStr string, opStatusStr string, nbUsers int, longitude *float32, latitude *float32) {
// Get AP Info from DB
jsonApInfo, _ := rc.JSONGetEntry(baseKey+typeZone+":"+zoneId+":"+typeAccessPoint+":"+apId, ".")
apInfo := convertJsonToAccessPointInfo(jsonApInfo)
// Create new AP info if necessary
if apInfo == nil {
apInfo = new(AccessPointInfo)
apInfo.AccessPointId = apId
apInfo.ResourceURL = hostUrl.String() + basePath + "queries/zones/" + zoneId + "/accessPoints/" + apId
}
previousNbUsers := apInfo.NumberOfUsers
// Update info
if opStatusStr != "" {
opStatus := convertStringToOperationStatus(opStatusStr)
apInfo.OperationStatus = &opStatus
}
if conTypeStr != "" {
conType := convertStringToConnectionType(conTypeStr)
apInfo.ConnectionType = &conType
}
if nbUsers != -1 {
apInfo.NumberOfUsers = int32(nbUsers)
}
// Update position
if longitude == nil || latitude == nil {
apInfo.LocationInfo = nil
} else {
if apInfo.LocationInfo == nil {
apInfo.LocationInfo = new(LocationInfo)
apInfo.LocationInfo.Accuracy = 1
}
//we only support shape != 7 in locationInfo
apInfo.LocationInfo.Shape = 2
apInfo.LocationInfo.Longitude = nil
apInfo.LocationInfo.Longitude = append(apInfo.LocationInfo.Longitude, *longitude)
apInfo.LocationInfo.Latitude = nil
apInfo.LocationInfo.Latitude = append(apInfo.LocationInfo.Latitude, *latitude)
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
apInfo.LocationInfo.Timestamp = &timeStamp
}
// Update AP info in DB & Send notifications
_ = rc.JSONSetEntry(baseKey+typeZone+":"+zoneId+":"+typeAccessPoint+":"+apId, ".", convertAccessPointInfoToJson(apInfo))
checkNotificationRegisteredZoneStatus(zoneId, apId, int32(nbUsers), int32(-1), previousNbUsers, int32(-1))
}
func zoneStatusReInit() {
//reusing the object response for the get multiple zoneStatusSubscription
var zoneList NotificationSubscriptionList
keyName := baseKey + typeZoneStatusSubscription + "*"
_ = rc.ForEachJSONEntry(keyName, populateZoneStatusList, &zoneList)
maxZoneStatusSubscriptionId := 0
mutex.Lock()
defer mutex.Unlock()
for _, zone := range zoneList.ZoneStatusSubscription {
resourceUrl := strings.Split(zone.ResourceURL, "/")
subscriptionId, err := strconv.Atoi(resourceUrl[len(resourceUrl)-1])
if err != nil {
log.Error(err)
} else {
if subscriptionId > maxZoneStatusSubscriptionId {
maxZoneStatusSubscriptionId = subscriptionId
}
var zoneStatus ZoneStatusCheck
opStatus := zone.OperationStatus
if opStatus != nil {
for i := 0; i < len(opStatus); i++ {
switch opStatus[i] {
case SERVICEABLE:
zoneStatus.Serviceable = true
case UNSERVICEABLE:
zoneStatus.Unserviceable = true
case OPSTATUS_UNKNOWN:
zoneStatus.Unknown = true
default:
}
}
}
zoneStatus.NbUsersInZoneThreshold = zone.NumberOfUsersZoneThreshold
zoneStatus.NbUsersInAPThreshold = zone.NumberOfUsersAPThreshold
zoneStatus.ZoneId = zone.ZoneId
zoneStatusSubscriptionMap[subscriptionId] = &zoneStatus
}
}
nextZoneStatusSubscriptionIdAvailable = maxZoneStatusSubscriptionId + 1
}
func zonalTrafficReInit() {
//reusing the object response for the get multiple zonalSubscription
var zoneList NotificationSubscriptionList
keyName := baseKey + typeZonalSubscription + "*"
_ = rc.ForEachJSONEntry(keyName, populateZonalTrafficList, &zoneList)
maxZonalSubscriptionId := 0
mutex.Lock()
defer mutex.Unlock()
for _, zone := range zoneList.ZonalTrafficSubscription {
resourceUrl := strings.Split(zone.ResourceURL, "/")
subscriptionId, err := strconv.Atoi(resourceUrl[len(resourceUrl)-1])
if err != nil {
log.Error(err)
} else {
if subscriptionId > maxZonalSubscriptionId {
maxZonalSubscriptionId = subscriptionId
}
for i := 0; i < len(zone.UserEventCriteria); i++ {
switch zone.UserEventCriteria[i] {
case ENTERING_EVENT:
zonalSubscriptionEnteringMap[subscriptionId] = zone.ZoneId
case LEAVING_EVENT:
zonalSubscriptionLeavingMap[subscriptionId] = zone.ZoneId
case TRANSFERRING_EVENT:
zonalSubscriptionTransferringMap[subscriptionId] = zone.ZoneId
default:
}
}
zonalSubscriptionMap[subscriptionId] = zone.ZoneId
}
}
nextZonalSubscriptionIdAvailable = maxZonalSubscriptionId + 1
}
func userTrackingReInit() {
//reusing the object response for the get multiple zonalSubscription
var userList NotificationSubscriptionList
keyName := baseKey + typeUserSubscription + "*"
_ = rc.ForEachJSONEntry(keyName, populateUserTrackingList, &userList)
maxUserSubscriptionId := 0
mutex.Lock()
defer mutex.Unlock()
for _, user := range userList.UserTrackingSubscription {
resourceUrl := strings.Split(user.ResourceURL, "/")
subscriptionId, err := strconv.Atoi(resourceUrl[len(resourceUrl)-1])
if err != nil {
log.Error(err)
} else {
if subscriptionId > maxUserSubscriptionId {
maxUserSubscriptionId = subscriptionId
}
for i := 0; i < len(user.UserEventCriteria); i++ {
switch user.UserEventCriteria[i] {
case ENTERING_EVENT:
userSubscriptionEnteringMap[subscriptionId] = user.Address
case LEAVING_EVENT:
userSubscriptionLeavingMap[subscriptionId] = user.Address
case TRANSFERRING_EVENT:
userSubscriptionTransferringMap[subscriptionId] = user.Address
default:
}
}
userSubscriptionMap[subscriptionId] = user.Address
}
}
nextUserSubscriptionIdAvailable = maxUserSubscriptionId + 1
}
|
[
"\"MEEP_SANDBOX_NAME\"",
"\"MEEP_PUBLIC_URL\"",
"\"MEEP_HOST_URL\""
] |
[] |
[
"MEEP_HOST_URL",
"MEEP_PUBLIC_URL",
"MEEP_SANDBOX_NAME"
] |
[]
|
["MEEP_HOST_URL", "MEEP_PUBLIC_URL", "MEEP_SANDBOX_NAME"]
|
go
| 3 | 0 | |
test/e2e/cluster_test.go
|
//go:build e2e
// +build e2e
/*
Copyright (c) 2021, 2022 Oracle and/or its affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
infrastructurev1beta1 "github.com/oracle/cluster-api-provider-oci/api/v1beta1"
"github.com/oracle/cluster-api-provider-oci/cloud/scope"
"github.com/oracle/oci-go-sdk/v53/common"
"github.com/oracle/oci-go-sdk/v53/core"
"github.com/oracle/oci-go-sdk/v53/networkloadbalancer"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var _ = Describe("Workload cluster creation", func() {
var (
ctx = context.TODO()
specName = "create-workload-cluster"
namespace *corev1.Namespace
cancelWatches context.CancelFunc
result *clusterctl.ApplyClusterTemplateAndWaitResult
clusterName string
clusterNamePrefix string
additionalCleanup func()
)
BeforeEach(func() {
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName)
Expect(e2eConfig.Variables).To(HaveKey(capi_e2e.KubernetesVersion))
// CLUSTER_NAME and CLUSTER_NAMESPACE allows for testing existing clusters
// if CLUSTER_NAMESPACE is set don't generate a new prefix otherwise
// the correct namespace won't be found and a new cluster will be created
clusterNameSpace := os.Getenv("CLUSTER_NAMESPACE")
if clusterNameSpace == "" {
clusterNamePrefix = fmt.Sprintf("capoci-e2e-%s", util.RandomString(6))
} else {
clusterNamePrefix = clusterNameSpace
}
// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
var err error
namespace, cancelWatches, err = setupSpecNamespace(ctx, clusterNamePrefix, bootstrapClusterProxy, artifactFolder)
Expect(err).NotTo(HaveOccurred())
result = new(clusterctl.ApplyClusterTemplateAndWaitResult)
additionalCleanup = nil
})
AfterEach(func() {
if result.Cluster == nil {
// this means the cluster failed to come up. We make an attempt to find the cluster to be able to fetch logs for the failed bootstrapping.
_ = bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace.Name}, result.Cluster)
}
cleanInput := cleanupInput{
SpecName: specName,
Cluster: result.Cluster,
ClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
CancelWatches: cancelWatches,
IntervalsGetter: e2eConfig.GetIntervals,
SkipCleanup: skipCleanup,
AdditionalCleanup: additionalCleanup,
ArtifactFolder: artifactFolder,
}
dumpSpecResourcesAndCleanup(ctx, cleanInput)
})
It("With 1 control-plane nodes and 1 worker nodes", func() {
clusterName = getClusterName(clusterNamePrefix, "simple")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: clusterctl.DefaultFlavor,
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
CNIManifestPath: e2eConfig.GetVariable(capi_e2e.CNIPath),
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
})
It("With 3 control plane nodes spread across failure domains", func() {
clusterName = getClusterName(clusterNamePrefix, "3nodecontrolplane")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: clusterctl.DefaultFlavor,
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(3),
WorkerMachineCount: pointer.Int64Ptr(0),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
validateFailureDomainSpread(namespace.Name, clusterName)
})
It("Antrea as CNI - With 1 control-plane nodes and 1 worker nodes", func() {
clusterName = getClusterName(clusterNamePrefix, "antrea")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "antrea",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
})
It("Oracle Linux - With 1 control-plane nodes and 1 worker nodes", func() {
clusterName = getClusterName(clusterNamePrefix, "oracle-linux")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "oracle-linux",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
validateOLImage(namespace.Name, clusterName)
})
It("Cloud Provider OCI testing", func() {
clusterName = getClusterName(clusterNamePrefix, "ccm-testing")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "ccm-testing",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable("CCM_TEST_KUBERNETES_VERSION"),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
k8sClient := bootstrapClusterProxy.GetClient()
ociCluster := &infrastructurev1beta1.OCICluster{}
ociClusterName := client.ObjectKey{
Namespace: namespace.Name,
Name: clusterName,
}
err := k8sClient.Get(ctx, ociClusterName, ociCluster)
Expect(err).NotTo(HaveOccurred())
vcn := ociCluster.Spec.NetworkSpec.Vcn.ID
compartment := ociCluster.Spec.CompartmentId
subnetId := ""
for _, subnet := range ociCluster.Spec.NetworkSpec.Vcn.Subnets {
if subnet.Role == infrastructurev1beta1.ControlPlaneEndpointRole {
subnetId = *subnet.ID
}
}
Expect(subnetId).To(Not(Equal("")))
ccmPath := e2eConfig.GetVariable("CCM_PATH")
b, err := os.ReadFile(ccmPath)
Expect(err).NotTo(HaveOccurred())
ccmCrs := string(b)
ccmCrs = strings.ReplaceAll(ccmCrs, "OCI_COMPARTMENT_ID", compartment)
ccmCrs = strings.ReplaceAll(ccmCrs, "OCI_COMPARTMENT_ID", compartment)
ccmCrs = strings.ReplaceAll(ccmCrs, "VCN_ID", *vcn)
ccmCrs = strings.ReplaceAll(ccmCrs, "SUBNET_ID", subnetId)
workloadClusterProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterName)
err = workloadClusterProxy.Apply(ctx, []byte(ccmCrs))
Expect(err).NotTo(HaveOccurred())
Log("Creating the LB service")
clusterClient := workloadClusterProxy.GetClient()
lbServiceName := "test-svc-" + util.RandomString(6)
createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
nginxStatefulsetInfo := statefulSetInfo{
name: "nginx-statefulset",
namespace: metav1.NamespaceDefault,
replicas: int32(1),
selector: map[string]string{"app": "nginx"},
storageClassName: "oci-bv",
volumeName: "nginx-volumes",
svcName: "nginx-svc",
svcPort: int32(80),
svcPortName: "nginx-web",
containerName: "nginx",
containerImage: "k8s.gcr.io/nginx-slim:0.8",
containerPort: int32(80),
podTerminationGracePeriod: int64(30),
volMountPath: "/usr/share/nginx/html",
}
By("Deploying StatefulSet on infra")
createStatefulSet(nginxStatefulsetInfo, clusterClient)
By("Deleting LB service")
deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
By("Deleting retained dynamically provisioned volumes")
deleteStatefulSet(nginxStatefulsetInfo, clusterClient)
deletePVC(nginxStatefulsetInfo, clusterClient)
})
It("Custom networking NSG", func() {
clusterName = getClusterName(clusterNamePrefix, "custom-nsg")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "custom-networking-nsg",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
k8sClient := bootstrapClusterProxy.GetClient()
ociCluster := &infrastructurev1beta1.OCICluster{}
ociClusterName := client.ObjectKey{
Namespace: namespace.Name,
Name: clusterName,
}
err := k8sClient.Get(ctx, ociClusterName, ociCluster)
Expect(err).NotTo(HaveOccurred())
err = validateCustonNSGNetworking(ctx, *ociCluster, clusterName, namespace.Name, result.MachineDeployments[0].Name)
Expect(err).NotTo(HaveOccurred())
})
It("Custom networking Seclist", func() {
clusterName = getClusterName(clusterNamePrefix, "custom-seclist")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "custom-networking-seclist",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
k8sClient := bootstrapClusterProxy.GetClient()
ociCluster := &infrastructurev1beta1.OCICluster{}
ociClusterName := client.ObjectKey{
Namespace: namespace.Name,
Name: clusterName,
}
err := k8sClient.Get(ctx, ociClusterName, ociCluster)
Expect(err).NotTo(HaveOccurred())
err = validateCustomNetworkingSeclist(ctx, *ociCluster, clusterName)
Expect(err).NotTo(HaveOccurred())
})
It("Multiple nsg and subnet", func() {
clusterName = getClusterName(clusterNamePrefix, "multi-subnet-nsg")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "multiple-node-nsg",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
verifyMultipleNsgSubnet(ctx, namespace.Name, clusterName, result.MachineDeployments)
})
When("Bare Metal workload cluster creation", func() {
It("Bare Metal - With 1 control-plane nodes and 1 worker nodes", func() {
clusterName = getClusterName(clusterNamePrefix, "bare-metal")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "bare-metal",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster-bare-metal"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane-bare-metal"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes-bare-metal"),
}, result)
})
})
})
func verifyMultipleNsgSubnet(ctx context.Context, namespace string, clusterName string, mcDeployments []*clusterv1.MachineDeployment) {
ociCluster := &infrastructurev1beta1.OCICluster{}
ociClusterName := client.ObjectKey{
Namespace: namespace,
Name: clusterName,
}
err := bootstrapClusterProxy.GetClient().Get(ctx, ociClusterName, ociCluster)
Expect(err).NotTo(HaveOccurred())
arrSubnets := [2]string{}
i := 0
for _, subnet := range ociCluster.Spec.NetworkSpec.Vcn.Subnets {
if subnet.Role == infrastructurev1beta1.WorkerRole {
arrSubnets[i] = *subnet.ID
i++
}
}
i = 0
arrNsgs := [2]string{}
for _, nsg := range ociCluster.Spec.NetworkSpec.Vcn.NetworkSecurityGroups {
if nsg.Role == infrastructurev1beta1.WorkerRole {
arrNsgs[i] = *nsg.ID
i++
}
}
for _, mcDeployment := range mcDeployments {
lister := bootstrapClusterProxy.GetClient()
inClustersNamespaceListOption := client.InNamespace(namespace)
matchClusterListOption := client.MatchingLabels{
clusterv1.ClusterLabelName: clusterName,
}
matchClusterListOption[clusterv1.MachineDeploymentLabelName] = mcDeployment.Name
machineList := &clusterv1.MachineList{}
Expect(lister.List(context.Background(), machineList, inClustersNamespaceListOption, matchClusterListOption)).
To(Succeed(), "Couldn't list machines for the cluster %q", clusterName)
requiredIndex := 0
if mcDeployment.Name == fmt.Sprintf("%s-md-1", clusterName) {
requiredIndex = 1
}
for _, machine := range machineList.Items {
instanceOcid := strings.Split(*machine.Spec.ProviderID, "//")[1]
Log(fmt.Sprintf("Instance OCID is %s", instanceOcid))
exists := false
resp, err := computeClient.ListVnicAttachments(ctx, core.ListVnicAttachmentsRequest{
InstanceId: common.String(instanceOcid),
CompartmentId: common.String(os.Getenv("OCI_COMPARTMENT_ID")),
})
Expect(err).NotTo(HaveOccurred())
for _, attachment := range resp.Items {
if attachment.LifecycleState != core.VnicAttachmentLifecycleStateAttached {
continue
}
if attachment.VnicId == nil {
continue
}
vnic, err := vcnClient.GetVnic(ctx, core.GetVnicRequest{
VnicId: attachment.VnicId,
})
Expect(err).NotTo(HaveOccurred())
if vnic.IsPrimary != nil && *vnic.IsPrimary {
exists = true
Expect(vnic.NsgIds[0]).To(Equal(arrNsgs[requiredIndex]))
Expect(*(vnic.SubnetId)).To(Equal(arrSubnets[requiredIndex]))
}
}
Expect(exists).To(Equal(true))
}
}
}
func validateCustonNSGNetworking(ctx context.Context, ociCluster infrastructurev1beta1.OCICluster, clusterName string, nameSpace string, machineDeployment string) error {
vcnId := ociCluster.Spec.NetworkSpec.Vcn.ID
resp, err := vcnClient.GetVcn(ctx, core.GetVcnRequest{
VcnId: vcnId,
})
Expect(err).NotTo(HaveOccurred())
Expect(resp.CidrBlocks[0]).To(Equal("15.0.0.0/16"))
Expect(*resp.DisplayName).To(Equal(fmt.Sprintf("%s-test", clusterName)))
listResponse, err := vcnClient.ListSubnets(ctx, core.ListSubnetsRequest{
VcnId: vcnId,
CompartmentId: common.String(os.Getenv("OCI_COMPARTMENT_ID")),
})
Expect(err).NotTo(HaveOccurred())
Expect(len(listResponse.Items)).To(Equal(4))
for _, subnet := range ociCluster.Spec.NetworkSpec.Vcn.Subnets {
subnetId := subnet.ID
resp, err := vcnClient.GetSubnet(ctx, core.GetSubnetRequest{
SubnetId: subnetId,
})
Expect(err).NotTo(HaveOccurred())
switch subnet.Role {
case infrastructurev1beta1.ControlPlaneEndpointRole:
Expect(*resp.CidrBlock).To(Equal("15.0.0.0/28"))
Expect(*resp.DisplayName).To(Equal("ep-subnet"))
case infrastructurev1beta1.ControlPlaneRole:
Expect(*resp.CidrBlock).To(Equal("15.0.5.0/28"))
Expect(*resp.DisplayName).To(Equal("cp-mc-subnet"))
case infrastructurev1beta1.WorkerRole:
Expect(*resp.CidrBlock).To(Equal("15.0.10.0/24"))
Expect(*resp.DisplayName).To(Equal("worker-subnet"))
case infrastructurev1beta1.ServiceLoadBalancerRole:
Expect(*resp.CidrBlock).To(Equal("15.0.20.0/24"))
Expect(*resp.DisplayName).To(Equal("svc-lb-subnet"))
default:
return errors.New("invalid subnet role")
}
}
// to make sure that the original spec was not changed, we should compare with the
// original unchanged spec present in the file system
reader, _ := os.Open(e2eConfig.GetVariable("NSG_CLUSTER_PATH"))
ociClusterOriginal := &infrastructurev1beta1.OCICluster{}
err = yaml.NewYAMLOrJSONDecoder(reader, 4096).Decode(ociClusterOriginal)
Expect(err).NotTo(HaveOccurred())
for _, nsg := range ociCluster.Spec.NetworkSpec.Vcn.NetworkSecurityGroups {
nsgId := nsg.ID
resp, err := vcnClient.GetNetworkSecurityGroup(ctx, core.GetNetworkSecurityGroupRequest{
NetworkSecurityGroupId: nsgId,
})
Expect(err).NotTo(HaveOccurred())
switch nsg.Role {
case infrastructurev1beta1.ControlPlaneEndpointRole:
verifyNsg(ctx, "ep-nsg", resp, ociClusterOriginal, infrastructurev1beta1.ControlPlaneEndpointRole)
lbId := ociCluster.Spec.NetworkSpec.APIServerLB.LoadBalancerId
lb, err := lbClient.GetNetworkLoadBalancer(ctx, networkloadbalancer.GetNetworkLoadBalancerRequest{
NetworkLoadBalancerId: lbId,
})
Expect(err).NotTo(HaveOccurred())
Expect(lb.NetworkSecurityGroupIds[0]).To(Equal(*nsgId))
case infrastructurev1beta1.ControlPlaneRole:
verifyNsg(ctx, "cp-mc-nsg", resp, ociClusterOriginal, infrastructurev1beta1.ControlPlaneRole)
Log("Validating control plane machine vnic NSG")
validateVnicNSG(ctx, clusterName, nameSpace, nsgId, "")
case infrastructurev1beta1.WorkerRole:
verifyNsg(ctx, "worker", resp, ociClusterOriginal, infrastructurev1beta1.WorkerRole)
Log("Validating node machine vnic NSG")
validateVnicNSG(ctx, clusterName, nameSpace, nsgId, machineDeployment)
case infrastructurev1beta1.ServiceLoadBalancerRole:
verifyNsg(ctx, "service-lb-nsg", resp, ociClusterOriginal, infrastructurev1beta1.ServiceLoadBalancerRole)
default:
return errors.New("invalid nsg role")
}
}
return nil
}
func validateVnicNSG(ctx context.Context, clusterName string, nameSpace string, nsgId *string, machineDeployment string) {
lister := bootstrapClusterProxy.GetClient()
inClustersNamespaceListOption := client.InNamespace(nameSpace)
matchClusterListOption := client.MatchingLabels{
clusterv1.ClusterLabelName: clusterName,
}
// its either a machine deployment or control plane
if machineDeployment != "" {
matchClusterListOption[clusterv1.MachineDeploymentLabelName] = machineDeployment
} else {
matchClusterListOption[clusterv1.MachineControlPlaneLabelName] = ""
}
machineList := &clusterv1.MachineList{}
Expect(lister.List(context.Background(), machineList, inClustersNamespaceListOption, matchClusterListOption)).
To(Succeed(), "Couldn't list machines for the cluster %q", clusterName)
Log(fmt.Sprintf("NSG id is %s", *nsgId))
exists := false
for _, machine := range machineList.Items {
instanceOcid := strings.Split(*machine.Spec.ProviderID, "//")[1]
Log(fmt.Sprintf("Instance OCID is %s", instanceOcid))
resp, err := computeClient.ListVnicAttachments(ctx, core.ListVnicAttachmentsRequest{
InstanceId: common.String(instanceOcid),
CompartmentId: common.String(os.Getenv("OCI_COMPARTMENT_ID")),
})
Expect(err).NotTo(HaveOccurred())
for _, attachment := range resp.Items {
if attachment.LifecycleState != core.VnicAttachmentLifecycleStateAttached {
continue
}
if attachment.VnicId == nil {
continue
}
vnic, err := vcnClient.GetVnic(ctx, core.GetVnicRequest{
VnicId: attachment.VnicId,
})
Expect(err).NotTo(HaveOccurred())
if vnic.IsPrimary != nil && *vnic.IsPrimary {
exists = true
Expect(vnic.NsgIds[0]).To(Equal(*nsgId))
}
}
Expect(exists).To(Equal(true))
}
}
func validateCustomNetworkingSeclist(ctx context.Context, ociCluster infrastructurev1beta1.OCICluster, clusterName string) error {
vcnId := ociCluster.Spec.NetworkSpec.Vcn.ID
resp, err := vcnClient.GetVcn(ctx, core.GetVcnRequest{
VcnId: vcnId,
})
Expect(err).NotTo(HaveOccurred())
Expect(resp.CidrBlocks[0]).To(Equal("10.0.0.0/16"))
Expect(*resp.DisplayName).To(Equal(fmt.Sprintf("%s-test", clusterName)))
listResponse, err := vcnClient.ListSubnets(ctx, core.ListSubnetsRequest{
VcnId: vcnId,
CompartmentId: common.String(os.Getenv("OCI_COMPARTMENT_ID")),
})
Expect(err).NotTo(HaveOccurred())
Expect(len(listResponse.Items)).To(Equal(4))
// to make sure that the original spec was not changed, we should compare with the
// original unchanged spec present in the file system
reader, _ := os.Open(e2eConfig.GetVariable("SECLIST_CLUSTER_PATH"))
ociClusterOriginal := &infrastructurev1beta1.OCICluster{}
err = yaml.NewYAMLOrJSONDecoder(reader, 4096).Decode(ociClusterOriginal)
Expect(err).NotTo(HaveOccurred())
for _, subnet := range ociCluster.Spec.NetworkSpec.Vcn.Subnets {
subnetId := subnet.ID
resp, err := vcnClient.GetSubnet(ctx, core.GetSubnetRequest{
SubnetId: subnetId,
})
Expect(err).NotTo(HaveOccurred())
switch subnet.Role {
case infrastructurev1beta1.ControlPlaneEndpointRole:
verifySeclistSubnet(ctx, resp, ociClusterOriginal, subnet, "ep-subnet", scope.ControlPlaneEndpointSubnetDefaultCIDR)
case infrastructurev1beta1.ControlPlaneRole:
verifySeclistSubnet(ctx, resp, ociClusterOriginal, subnet, "cp-mc-subnet", scope.ControlPlaneMachineSubnetDefaultCIDR)
case infrastructurev1beta1.WorkerRole:
verifySeclistSubnet(ctx, resp, ociClusterOriginal, subnet, "worker-subnet", scope.WorkerSubnetDefaultCIDR)
case infrastructurev1beta1.ServiceLoadBalancerRole:
verifySeclistSubnet(ctx, resp, ociClusterOriginal, subnet, "svc-lb-subnet", scope.ServiceLoadBalancerDefaultCIDR)
default:
return errors.New("invalid subnet role")
}
}
return nil
}
func verifySeclistSubnet(ctx context.Context, resp core.GetSubnetResponse, ociClusterOriginal *infrastructurev1beta1.OCICluster, subnet *infrastructurev1beta1.Subnet, subnetName string, cidrBlock string) {
Expect(*resp.CidrBlock).To(Equal(cidrBlock))
Expect(*resp.DisplayName).To(Equal(subnetName))
secList := resp.SecurityListIds[0]
r, err := vcnClient.GetSecurityList(ctx, core.GetSecurityListRequest{
SecurityListId: common.String(secList),
})
Expect(err).NotTo(HaveOccurred())
matches := 0
for _, n := range ociClusterOriginal.Spec.NetworkSpec.Vcn.Subnets {
if n.Role == subnet.Role {
matches++
var ingressRules = make([]core.IngressSecurityRule, 0)
for _, irule := range n.SecurityList.IngressRules {
ingressRules = append(ingressRules, convertSecurityListIngressRule(irule))
}
var egressRules = make([]core.EgressSecurityRule, 0)
for _, irule := range n.SecurityList.EgressRules {
egressRules = append(egressRules, convertSecurityListEgressRule(irule))
}
Expect(r.EgressSecurityRules).To(Equal(egressRules))
Expect(r.IngressSecurityRules).To(Equal(ingressRules))
}
}
Expect(matches).To(Equal(1))
}
func verifyNsg(ctx context.Context, displayName string, resp core.GetNetworkSecurityGroupResponse, ociClusterOriginal *infrastructurev1beta1.OCICluster, role infrastructurev1beta1.Role) {
listResponse, err := vcnClient.ListNetworkSecurityGroupSecurityRules(ctx, core.ListNetworkSecurityGroupSecurityRulesRequest{
NetworkSecurityGroupId: resp.Id,
})
Expect(err).NotTo(HaveOccurred())
ingressRules, egressRules := generateSpecFromSecurityRules(listResponse.Items)
matches := 0
for _, n := range ociClusterOriginal.Spec.NetworkSpec.Vcn.NetworkSecurityGroups {
if n.Role == role {
matches++
Expect(ingressRules).To(Equal(n.IngressRules))
Expect(egressRules).To(Equal(n.EgressRules))
}
}
Expect(matches).To(Equal(1))
}
func generateSpecFromSecurityRules(rules []core.SecurityRule) ([]infrastructurev1beta1.IngressSecurityRuleForNSG, []infrastructurev1beta1.EgressSecurityRuleForNSG) {
var ingressRules []infrastructurev1beta1.IngressSecurityRuleForNSG
var egressRules []infrastructurev1beta1.EgressSecurityRuleForNSG
for _, rule := range rules {
// while comparing values, the boolean value has to be always set
stateless := rule.IsStateless
if stateless == nil {
stateless = common.Bool(false)
}
icmpOptions, tcpOptions, udpOptions := getProtocolOptionsForSpec(rule.IcmpOptions, rule.TcpOptions, rule.UdpOptions)
switch rule.Direction {
case core.SecurityRuleDirectionIngress:
ingressRule := infrastructurev1beta1.IngressSecurityRuleForNSG{
IngressSecurityRule: infrastructurev1beta1.IngressSecurityRule{
Protocol: rule.Protocol,
Source: rule.Source,
IcmpOptions: icmpOptions,
IsStateless: stateless,
SourceType: infrastructurev1beta1.IngressSecurityRuleSourceTypeEnum(rule.SourceType),
TcpOptions: tcpOptions,
UdpOptions: udpOptions,
Description: rule.Description,
},
}
ingressRules = append(ingressRules, ingressRule)
case core.SecurityRuleDirectionEgress:
egressRule := infrastructurev1beta1.EgressSecurityRuleForNSG{
EgressSecurityRule: infrastructurev1beta1.EgressSecurityRule{
Destination: rule.Destination,
Protocol: rule.Protocol,
DestinationType: infrastructurev1beta1.EgressSecurityRuleDestinationTypeEnum(rule.DestinationType),
IcmpOptions: icmpOptions,
IsStateless: stateless,
TcpOptions: tcpOptions,
UdpOptions: udpOptions,
Description: rule.Description,
},
}
egressRules = append(egressRules, egressRule)
}
}
return ingressRules, egressRules
}
func validateFailureDomainSpread(nameSpace string, clusterName string) {
lister := bootstrapClusterProxy.GetClient()
inClustersNamespaceListOption := client.InNamespace(nameSpace)
matchClusterListOption := client.MatchingLabels{
clusterv1.ClusterLabelName: clusterName,
clusterv1.MachineControlPlaneLabelName: "",
}
machineList := &clusterv1.MachineList{}
Expect(lister.List(context.Background(), machineList, inClustersNamespaceListOption, matchClusterListOption)).
To(Succeed(), "Couldn't list machines for the cluster %q", clusterName)
failureDomainCounts := map[string]int{}
ociFailureDomain := map[string]int{}
// Count all control plane machine failure domains.
for _, machine := range machineList.Items {
if machine.Spec.FailureDomain == nil {
continue
}
failureDomainCounts[*machine.Spec.FailureDomain]++
instanceOcid := strings.Split(*machine.Spec.ProviderID, "//")[1]
Log(fmt.Sprintf("Instance OCID is %s", instanceOcid))
resp, err := computeClient.GetInstance(context.Background(), core.GetInstanceRequest{
InstanceId: common.String(instanceOcid),
})
Expect(err).NotTo(HaveOccurred())
if adCount > 1 {
ociFailureDomain[*resp.AvailabilityDomain]++
} else {
ociFailureDomain[*resp.FaultDomain]++
}
}
Expect(len(failureDomainCounts)).To(Equal(3))
Expect(len(ociFailureDomain)).To(Equal(3))
}
func validateOLImage(nameSpace string, clusterName string) {
lister := bootstrapClusterProxy.GetClient()
inClustersNamespaceListOption := client.InNamespace(nameSpace)
matchClusterListOption := client.MatchingLabels{
clusterv1.ClusterLabelName: clusterName,
}
machineList := &clusterv1.MachineList{}
Expect(lister.List(context.Background(), machineList, inClustersNamespaceListOption, matchClusterListOption)).
To(Succeed(), "Couldn't list machines for the cluster %q", clusterName)
Expect(len(machineList.Items)).To(Equal(2))
for _, machine := range machineList.Items {
instanceOcid := strings.Split(*machine.Spec.ProviderID, "//")[1]
Log(fmt.Sprintf("Instance OCID is %s", instanceOcid))
resp, err := computeClient.GetInstance(context.Background(), core.GetInstanceRequest{
InstanceId: common.String(instanceOcid),
})
Expect(err).NotTo(HaveOccurred())
instanceSourceDetails, ok := resp.SourceDetails.(core.InstanceSourceViaImageDetails)
Expect(ok).To(BeTrue())
Expect(*instanceSourceDetails.ImageId).To(Equal(os.Getenv("OCI_ORACLE_LINUX_IMAGE_ID")))
}
}
func getClusterName(prefix, specName string) string {
clusterName := os.Getenv("CLUSTER_NAME")
if clusterName == "" {
clusterName = fmt.Sprintf("%s-%s", prefix, specName)
}
fmt.Fprintf(GinkgoWriter, "INFO: Cluster name is %s\n", clusterName)
return clusterName
}
func getProtocolOptionsForSpec(icmp *core.IcmpOptions, tcp *core.TcpOptions, udp *core.UdpOptions) (*infrastructurev1beta1.IcmpOptions, *infrastructurev1beta1.TcpOptions,
*infrastructurev1beta1.UdpOptions) {
var icmpOptions *infrastructurev1beta1.IcmpOptions
var tcpOptions *infrastructurev1beta1.TcpOptions
var udpOptions *infrastructurev1beta1.UdpOptions
if icmp != nil {
icmpOptions = &infrastructurev1beta1.IcmpOptions{
Type: icmp.Type,
Code: icmp.Type,
}
}
if tcp != nil {
tcpOptions = &infrastructurev1beta1.TcpOptions{}
if tcp.DestinationPortRange != nil {
tcpOptions.DestinationPortRange = &infrastructurev1beta1.PortRange{}
tcpOptions.DestinationPortRange.Max = tcp.DestinationPortRange.Max
tcpOptions.DestinationPortRange.Min = tcp.DestinationPortRange.Min
}
if tcp.SourcePortRange != nil {
tcpOptions.SourcePortRange = &infrastructurev1beta1.PortRange{}
tcpOptions.SourcePortRange.Max = tcp.SourcePortRange.Max
tcpOptions.SourcePortRange.Min = tcp.SourcePortRange.Min
}
}
if udp != nil {
udpOptions = &infrastructurev1beta1.UdpOptions{}
if udp.DestinationPortRange != nil {
udpOptions.DestinationPortRange = &infrastructurev1beta1.PortRange{}
udpOptions.DestinationPortRange.Max = udp.DestinationPortRange.Max
udpOptions.DestinationPortRange.Min = udp.DestinationPortRange.Min
}
if udp.SourcePortRange != nil {
udpOptions.SourcePortRange = &infrastructurev1beta1.PortRange{}
udpOptions.SourcePortRange.Max = udp.SourcePortRange.Max
udpOptions.SourcePortRange.Min = udp.SourcePortRange.Min
}
}
return icmpOptions, tcpOptions, udpOptions
}
func convertSecurityListIngressRule(rule infrastructurev1beta1.IngressSecurityRule) core.IngressSecurityRule {
var icmpOptions *core.IcmpOptions
var tcpOptions *core.TcpOptions
var udpOptions *core.UdpOptions
icmpOptions, tcpOptions, udpOptions = getProtocolOptions(rule.IcmpOptions, rule.TcpOptions, rule.UdpOptions)
// while comparing values, the boolean value has to be always set
stateless := rule.IsStateless
if stateless == nil {
stateless = common.Bool(false)
}
return core.IngressSecurityRule{
Protocol: rule.Protocol,
Source: rule.Source,
IcmpOptions: icmpOptions,
IsStateless: stateless,
SourceType: core.IngressSecurityRuleSourceTypeEnum(rule.SourceType),
TcpOptions: tcpOptions,
UdpOptions: udpOptions,
Description: rule.Description,
}
}
func convertSecurityListEgressRule(rule infrastructurev1beta1.EgressSecurityRule) core.EgressSecurityRule {
var icmpOptions *core.IcmpOptions
var tcpOptions *core.TcpOptions
var udpOptions *core.UdpOptions
// while comparing values, the boolean value has to be always set
stateless := rule.IsStateless
if stateless == nil {
stateless = common.Bool(false)
}
icmpOptions, tcpOptions, udpOptions = getProtocolOptions(rule.IcmpOptions, rule.TcpOptions, rule.UdpOptions)
return core.EgressSecurityRule{
Protocol: rule.Protocol,
Destination: rule.Destination,
IcmpOptions: icmpOptions,
IsStateless: stateless,
DestinationType: core.EgressSecurityRuleDestinationTypeEnum(rule.DestinationType),
TcpOptions: tcpOptions,
UdpOptions: udpOptions,
Description: rule.Description,
}
}
func getProtocolOptions(icmp *infrastructurev1beta1.IcmpOptions, tcp *infrastructurev1beta1.TcpOptions,
udp *infrastructurev1beta1.UdpOptions) (*core.IcmpOptions, *core.TcpOptions, *core.UdpOptions) {
var icmpOptions *core.IcmpOptions
var tcpOptions *core.TcpOptions
var udpOptions *core.UdpOptions
if icmp != nil {
icmpOptions = &core.IcmpOptions{
Type: icmp.Type,
Code: icmp.Type,
}
}
if tcp != nil {
tcpOptions = &core.TcpOptions{}
if tcp.DestinationPortRange != nil {
tcpOptions.DestinationPortRange = &core.PortRange{}
tcpOptions.DestinationPortRange.Max = tcp.DestinationPortRange.Max
tcpOptions.DestinationPortRange.Min = tcp.DestinationPortRange.Min
}
if tcp.SourcePortRange != nil {
tcpOptions.SourcePortRange = &core.PortRange{}
tcpOptions.SourcePortRange.Max = tcp.SourcePortRange.Max
tcpOptions.SourcePortRange.Min = tcp.SourcePortRange.Min
}
}
if udp != nil {
udpOptions = &core.UdpOptions{}
if udp.DestinationPortRange != nil {
udpOptions.DestinationPortRange = &core.PortRange{}
udpOptions.DestinationPortRange.Max = udp.DestinationPortRange.Max
udpOptions.DestinationPortRange.Min = udp.DestinationPortRange.Min
}
if udp.SourcePortRange != nil {
udpOptions.SourcePortRange = &core.PortRange{}
udpOptions.SourcePortRange.Max = udp.SourcePortRange.Max
udpOptions.SourcePortRange.Min = udp.SourcePortRange.Min
}
}
return icmpOptions, tcpOptions, udpOptions
}
|
[
"\"CLUSTER_NAMESPACE\"",
"\"OCI_COMPARTMENT_ID\"",
"\"OCI_COMPARTMENT_ID\"",
"\"OCI_COMPARTMENT_ID\"",
"\"OCI_COMPARTMENT_ID\"",
"\"OCI_ORACLE_LINUX_IMAGE_ID\"",
"\"CLUSTER_NAME\""
] |
[] |
[
"OCI_COMPARTMENT_ID",
"OCI_ORACLE_LINUX_IMAGE_ID",
"CLUSTER_NAME",
"CLUSTER_NAMESPACE"
] |
[]
|
["OCI_COMPARTMENT_ID", "OCI_ORACLE_LINUX_IMAGE_ID", "CLUSTER_NAME", "CLUSTER_NAMESPACE"]
|
go
| 4 | 0 | |
mirror/github/licenses.py
|
"""
Collect license information for a repository or a list of repositories
"""
import argparse
import json
import os
import sys
import click
import time
from typing import Any, Dict, List
import requests
from tqdm import tqdm
subcommand = "licenses"
def get_license(repo_api_url: str) -> Dict[str, Any]:
"""
Gets the license for the repository at the given GitHub API URL.
Args:
repo_api_url
GitHub API URL for a repository. These are of the form:
https://api.github.com/repos/:owner/:name
This URL is allowed to have a trailing slash.
Returns: JSON-serializable dictionary of the form:
{'ending_rate_limit': <rate limit after the query>, 'data': <license info>}
"""
headers = {
'Accept': 'application/vnd.github.v3+json',
'User-Agent': 'simiotics mirror',
}
github_token = os.environ.get('GITHUB_TOKEN')
if github_token is not None and github_token != '':
headers['Authorization'] = f'token {github_token}'
if repo_api_url[-1] == '/':
repo_api_url = repo_api_url[:-1]
license_url = f'{repo_api_url}/license'
r = requests.get(license_url, headers)
ending_rate_limit_raw = r.headers.get('X-RateLimit-Remaining', '')
try:
ending_rate_limit = int(ending_rate_limit_raw)
except:
ending_rate_limit = -1
result: Dict[str, Any] = {
'ending_rate_limit': ending_rate_limit,
'data': r.json(),
}
return result
@click.command(context_settings={
"ignore_unknown_options": True
})
@click.option('--repos', '-r', help='File with JSON array of GitHub API URLs for repos (if value is "file:<filename>") '
'OR comma-separated list of GitHub API URLs of repos')
@click.option('--interval', '-t', type=float, default=0.01, help='Number of seconds to wait between page retrievals from /repositories endpoint')
@click.option('--min-rate-limit', '-l', type=int, default=30, help='Minimum remaining rate limit on API under which the crawl is interrupted')
@click.option('--outfile', '-o', default=30, help='File to which to write license information as JSON lines, one per repository')
def licenses_handler(repos_json: str, interval: float, min_rate_limit: int, outfile: str) -> None:
"""
Handler for licenses subcommand
Args:
args
argparse namespace representing arguments to "mirror github licenses" command parse from
the command line
Returns: None, prints license information for the repositories in args.repos to stdout or to the
file specified by args.outfile
"""
repos: List[str] = []
if repos_json[:len('file:')] == 'file:':
infile = repos_json[len('file:'):]
with open(infile, 'r') as ifp:
repos = json.load(ifp)
else:
repos = repos_json.split(',')
ofp = sys.stdout
if outfile is not None:
ofp = open(outfile, 'w')
for repo in repos:
time.sleep(interval)
result = get_license(repo)
print(json.dumps(result), file=ofp)
if result['ending_rate_limit'] < min_rate_limit:
break
if outfile is not None:
ofp.close()
|
[] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
python
| 1 | 0 | |
python/daisykit/utils/asset_store.py
|
# Copyright 2021 The DaisyKit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This source file was taken from ncnn library with some modifications.
"""Asset store which provides pretrained assets."""
from __future__ import print_function
import pathlib
__all__ = ["get_asset_file", "purge"]
import os
import zipfile
import logging
import portalocker
from .download import download, check_sha1
_asset_sha1 = {
name: checksum
for checksum, name in [('ccaf99f5ab164bbc548af61e1c1df207e9dcb0f1', 'configs/object_detector_yolox_config.json'), ('b681cb0081900d3be63a9cd84bcb7195123f8078', 'configs/background_matting_config.json'), ('1b3c28c98e2d9e19112bb604da9a5bbdcea1bba3', 'configs/barcode_scanner_config.json'), ('2a3c28c98cc308a0c16242a3b3cdc1bc116bb4f6', 'configs/pushup_counter_config.json'), ('83011c1dbbd1fad3a68b0b716bae9ed12d76e339', 'configs/hand_pose_yolox_mp_config.json'), ('acd2cf1c46c188d58fbcec8c4c2d0aaad4473483', 'configs/face_detector_config.json'), ('3713bad289962ee5e81a9dd761013fb95352969e', 'configs/human_pose_movenet_config.json'), ('069623b785bc2a0850bce39f0f5ef48a2c37f758', 'images/background.jpg'), ('3e57a3fbb29f530411c5fe566c5d8215b05b1913', 'models/facial_landmark/pfld-sim.param'), ('cf8cc7a623ba31cb8c700d59bf7bd622f081d695', 'models/facial_landmark/pfld-sim.bin'), ('37271c3e4dbdb21e7fe1939e7b46ff88fc978dba', 'models/face_detection/rfb/RFB-320.param'), ('91ae3956c62b14176e224d0997b8f693f9913612', 'models/face_detection/rfb/RFB-320.bin'), ('b673647bd428ee5eb8b2467f2986a7f3c6b85a7e', 'models/face_detection/slim/slim_320.param'), ('7865fa597b1d659b35070862512a3862c9b7bd6c', 'models/face_detection/slim/slim_320.bin'), ('1ab6d93570e408e8c85c759ee1c61d67cb9509b6', 'models/face_detection/yolo_fastest_with_mask/yolo-fastest-opt.bin'), ('7451cb1978b7f51a2cc92248e9c103fd456e0f74', 'models/face_detection/yolo_fastest_with_mask/yolo-fastest-opt.param'), ('f267486e12f817da0b810ef03aaad940ab5c5253', 'models/human_pose_detection/ultralight_nano/Ultralight-Nano-SimplePose.param'), ('505bacafeb0a1607a1ab7096e208106ac74f8ead', 'models/human_pose_detection/ultralight_nano/Ultralight-Nano-SimplePose.bin'), ('dc944ed31eaad52d6ef0bf98852aa174ed988796', 'models/human_pose_detection/movenet/lightning.param'), ('78bd88d738dcc7edcc7bc570e1456ae26f9611ec', 'models/human_pose_detection/movenet/thunder.param'), ('1af4dcb8b69f4794df8b281e8c5367ed0ee38290', 'models/human_pose_detection/movenet/thunder.bin'), ('d6b50b69a7e7c9ae8614f34f643c006b35daf3fd', 'models/human_pose_detection/movenet/lightning.bin'), ('be69b7a08c2decc3de52cfc7f7086bc8bc4046f3', 'models/object_detection/yolox-tiny.param'), ('aef43afeef44c045a4173a45d512ff98facfdfcb', 'models/object_detection/yolox-nano.bin'), ('836cb7b908db231c4e575c2ece9d90f031474a37', 'models/object_detection/yolox-nano.param'), ('dd3a085ceaf7efe96541772a1c9ea28b288ded0c', 'models/object_detection/yolox-tiny.bin'), ('c4a952480ea26c1e2097e80f5589df707f31463c', 'models/human_detection/ssd_mobilenetv2.param'), ('88d4e03b3cccc25de4f214ef55d42b52173653c0', 'models/human_detection/ssd_mobilenetv2.bin'), ('ed2c3819ea314f79613cae2cc4edbe509d274702', 'models/action_classification/is_pushup.param'), ('2e37cd61fd083b0d7b9cd6d11bb37b06d95a1fd6', 'models/action_classification/is_pushup.bin'), ('adee101923317578dc8fa41ca680fd9c6f877187', 'models/background_matting/erd/erdnet.param'), ('c54b251a44fb0e542ff77ea3bac79883a63bc8d7', 'models/background_matting/erd/erdnet.bin'), ('8d0ee0cfe95843d72c7da1948ae535899ebd2711', 'models/hand_pose/hand_lite-op.param'), ('c9fe82e4c0fe6ee274757cece1a9269578806282', 'models/hand_pose/yolox_hand_relu.bin'), ('f09f29e615a92bf8f51a6ed35b6fc91cb4778c54', 'models/hand_pose/hand_lite-op.bin'), ('24455837664448b4ad00da95d797c475303932f7', 'models/hand_pose/yolox_hand_swish.bin'), ('e859e4766d5892085678c253b06324e463ab4728', 'models/hand_pose/hand_full-op.bin'), ('cf4497bf1ebd69f9fe404fbad765af29368fbb45', 'models/hand_pose/hand_full-op.param'), ('5c2d4a808bc5a99c5bde698b1e52052ec0d0eee4', 'models/hand_pose/yolox_hand_relu.param'), ('7fa457c0a355fd56b9fd72806b8ba2738e1b1dda', 'models/hand_pose/yolox_hand_swish.param'), ('62b0166f33214ee3c311028e5b57d2d333da5d05', 'models/hand_pose/human_pose_movenet_config.json')]
}
_split_asset_bins = {}
github_repo_url = "https://github.com/Daisykit-AI/daisykit-assets/raw/master/"
_url_format = "{repo_url}{file_name}"
def merge_file(root, files_in, file_out, remove=True):
with open(file_out, "wb") as fd_out:
for file_in in files_in:
file = os.path.join(root, file_in)
with open(file, "rb") as fd_in:
fd_out.write(fd_in.read())
if remove == True:
os.remove(file)
def short_hash(name):
if name not in _asset_sha1:
raise ValueError(
"Pretrained asset for {name} is not available.".format(name=name)
)
return _asset_sha1[name][:8]
def get_asset_file(name, tag=None, root=os.path.join("~", ".daisykit", "assets")):
r"""Return location for the pretrained on local file system.
This function will download from online asset zoo when asset cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the asset.
root : str, default '~/.daisykit/assets'
Location for keeping the asset parameters.
Returns
-------
file_path
Path to the requested asset file.
"""
if "DAISYKIT_HOME" in os.environ:
root = os.path.join(os.environ["DAISYKIT_HOME"], "assets")
use_tag = isinstance(tag, str)
if use_tag:
file_name = "{name}-{short_hash}".format(name=name, short_hash=tag)
else:
file_name = "{name}".format(name=name)
root = os.path.expanduser(root)
params_path = os.path.join(root, file_name)
lockfile = os.path.join(root, file_name + ".lock")
# Create folder
pathlib.Path(lockfile).parents[0].mkdir(parents=True, exist_ok=True)
if use_tag:
sha1_hash = tag
else:
sha1_hash = _asset_sha1[name]
with portalocker.Lock(
lockfile, timeout=int(os.environ.get(
"DAISYKIT_ASSET_LOCK_TIMEOUT", 300))
):
if os.path.exists(params_path):
if check_sha1(params_path, sha1_hash):
return params_path
else:
logging.warning(
"Hash mismatch in the content of asset file '%s' detected. "
"Downloading again.",
params_path,
)
else:
logging.info("Asset file not found. Downloading.")
zip_file_path = os.path.join(root, file_name)
if file_name in _split_asset_bins:
file_name_parts = [
"%s.part%02d" % (file_name, i + 1)
for i in range(_split_asset_bins[file_name])
]
for file_name_part in file_name_parts:
file_path = os.path.join(root, file_name_part)
repo_url = os.environ.get("DAISYKIT_REPO", github_repo_url)
if repo_url[-1] != "/":
repo_url = repo_url + "/"
download(
_url_format.format(repo_url=repo_url,
file_name=file_name_part),
path=file_path,
overwrite=True,
)
merge_file(root, file_name_parts, zip_file_path)
else:
repo_url = os.environ.get("DAISYKIT_REPO", github_repo_url)
if repo_url[-1] != "/":
repo_url = repo_url + "/"
download(
_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True,
)
if zip_file_path.endswith(".zip"):
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
# Make sure we write the asset file on networked filesystems
try:
os.sync()
except AttributeError:
pass
if check_sha1(params_path, sha1_hash):
return params_path
else:
raise ValueError(
"Downloaded file has different hash. Please try again.")
def purge(root=os.path.join("~", ".daisykit", "assets")):
r"""Purge all pretrained asset files in local file store.
Parameters
----------
root : str, default '~/.daisykit/assets'
Location for keeping the asset parameters.
"""
root = os.path.expanduser(root)
files = os.listdir(root)
for f in files:
if f.endswith(".params"):
os.remove(os.path.join(root, f))
|
[] |
[] |
[
"DAISYKIT_REPO",
"DAISYKIT_HOME",
"DAISYKIT_ASSET_LOCK_TIMEOUT"
] |
[]
|
["DAISYKIT_REPO", "DAISYKIT_HOME", "DAISYKIT_ASSET_LOCK_TIMEOUT"]
|
python
| 3 | 0 | |
cmd/synthesizer/main.go
|
//-- Package Declaration -----------------------------------------------------------------------------------------------
package main
//-- Imports -----------------------------------------------------------------------------------------------------------
import (
"log"
"math/rand"
"time"
"github.com/JustonDavies/go_browser_forensics/configs"
"github.com/JustonDavies/go_browser_forensics/pkg/browsers"
)
//-- Constants ---------------------------------------------------------------------------------------------------------
//-- Structs -----------------------------------------------------------------------------------------------------------
//-- Exported Functions ------------------------------------------------------------------------------------------------
func main() {
//-- Log nice output ----------
var start = time.Now().Unix()
log.Println(`Starting task...`)
//-- Perform task ----------
var browserz = browsers.Open()
if len(browserz) < 1 {
panic(`unable to open any supported browsers, aborting...`)
} else {
defer browsers.Close(browserz)
rand.Seed(time.Now().UnixNano())
}
browsers.Load(browserz)
browsers.Purge(browserz)
log.Println(`Creating history...`)
for _, item := range configs.ActivityItems {
var browser = browserz[rand.Intn(len(browserz))]
var item = browsers.History{
Name: item.Name,
URL: item.URL,
Visits: rand.Intn(configs.MaximumVisits),
VisitWindow: configs.DefaultDuration,
}
if err := browser.AddHistory(item); err != nil {
log.Printf("unable to inject history item for: \n\tURL: '%s' \n\tError: '%s'", item.URL, err)
}
}
log.Println(`Creating bookmarks...`)
for _, item := range configs.ActivityItems {
if rand.Intn(configs.BookmarkOneInX) == 0 {
var browser = browserz[rand.Intn(len(browserz))]
var item = browsers.Bookmark{
Name: item.Name,
URL: item.URL,
CreateWindow: configs.DefaultDuration,
}
if err := browser.AddBookmark(item); err != nil {
log.Printf("unable to inject bookmark item for: \n\tURL: '%s' \n\tError: '%s'", item.URL, err)
}
}
}
log.Println(`Committing changes...`)
browsers.Commit(browserz)
//-- Log nice output ----------
log.Printf(`Task complete! It took %d seconds`, time.Now().Unix()-start)
}
//-- Internal Functions ------------------------------------------------------------------------------------------------
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.