filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tools/build/rgo.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Jonas Byström
# Copyright (c) 2002-2009, Righteous Games
from __future__ import with_statement
import optparse
import os
import sys
import rgohelp
#appnames = ["uicure/curetestapp", "life", "slimevolleyball", "killCutie", "tirefire"]
appnames = ["bound"]
fullname = "Bound"
osname = rgohelp._getosname()
hwname = rgohelp._gethwname()
datename = rgohelp._getdatename()
ismac = (osname == "Mac")
args = []
bindir = "bin"
buildtypes = ["debug", "rc", "final"]
default_build_mode = buildtypes[0]
ziptype = default_build_mode
builddir_ansi = {"debug":"Debug", "rc":"Release Candidate", "final":"Final"}
builddir_unicode = {"debug":"Unicode Debug", "rc":"Unicode Release Candidate", "final":"Unicode Final"}
builddir_types = {"ansi":builddir_ansi, "unicode":builddir_unicode}
own_tt = builddir_types["ansi"]
verbose = False
updates = 0
removes = 0
importscript = "tools/maya/import_chunky.py"
makefilescriptdir = "tools/gcc"
makefilescript = "generate_makefile.py"
showed_result = False
exclude_demacappify=['archive']
def _load_target_app():
try:
home = os.path.expanduser("~")
with open(home+"/.rgoapp", "rt") as appfile:
for line in appfile:
words = eval(line)
if len(words) >= 2:
global appnames
global fullname
appnames = words[:-1]
fullname = words[-1]
except FileNotFoundError:
print("Error: you have not yet set_target, do that first!")
sys.exit(1)
def _save_target_app(words):
if len(words) < 2:
print("Error: insczane method and app names!")
print("Example: should be something like \"KillCutie 'Kill Cutie'\"")
sys.exit(1)
home = os.path.expanduser("~")
with open(home+"/.rgoapp", "wt") as appfile:
appfile.write(str(words) + "\n")
global updates
updates += 1
_load_target_app()
def _buildext():
pass # Yey - STLport gone!
def _buildcode(command, buildtype):
make = rgohelp._getmake(rgohelp.VCBUILD)
ver = rgohelp._getvcver()
projext = "900" if ver == 9 else "10";
if command == "build":
_buildext()
if osname == "Windows": args = [make, "/useenv", "/M4", "life"+projext+".sln", own_tt[buildtype]+"|Win32"]
else: args = [make]
what = "incremental building code"
elif command == "rebuild":
_buildext()
if osname == "Windows": args = [make, "/useenv", "/M4", "/rebuild", "life"+projext+".sln", own_tt[buildtype]+"|Win32"]
else: args = [make, "clean", "all"]
what = "rebuilding code"
elif command == "clean":
if osname == "Windows": args = [make, "/useenv", "/clean", "life"+projext+".sln", own_tt[buildtype]+"|Win32"]
else: args = [make, "clean"]
what = "cleaning code"
print(args)
args = rgohelp.fixbuildcmd(args)
print(args)
if osname == "Windows":
os.chdir("life")
rgohelp._run(args, what)
if osname == "Windows":
os.chdir("..")
def _convertdata(filename):
printout = ['--verbose'] if verbose else []
rgohelp._run([sys.executable, '-OO', importscript] + printout + [filename], "importing "+filename)
def _incremental_build_data(sourcedir):
import glob
mas = glob.glob(os.path.join(sourcedir, "data/*.ma"))
for ma in mas:
ft = rgohelp._filetime(ma)
basename = os.path.splitext(ma)[0]
ini = basename+".ini"
if not os.path.exists(ini):
print("Warning: file %s missing..." % ini)
continue
ftini = rgohelp._filetime(ini)
if ftini > ft:
ft = ftini
fs = glob.glob(basename+"*")
real_fs = []
for f in fs:
fn = f.lower()
if fn.endswith(".class") or fn.endswith(".mesh") or fn.endswith(".phys"):
real_fs += [f]
fs = real_fs
if not fs:
#print("Converting %s as no converted files exist!" % (basename,))
_convertdata(ma)
for f in fs:
if rgohelp._filetime(f) < ft:
#print("Converting %s since %s has an older timestamp!" % (basename, f))
for f in fs:
os.remove(f)
_convertdata(ma)
break
def _incremental_copy(filelist, targetdir, buildtype, recursive=False):
global updates
import shutil
for filename in filelist:
global default_build_mode
if buildtype != default_build_mode and filename.lower().find("test") >= 0:
print("Skipping test binary named '%s'." % filename)
continue
if os.path.isdir(filename) and not recursive:
continue
if not os.path.exists(targetdir):
os.makedirs(targetdir)
targetfile = os.path.join(targetdir, os.path.split(filename)[1])
if not os.path.exists(targetfile) or rgohelp._filetime(filename) > rgohelp._filetime(targetfile):
print("Copying %s -> %s." % (filename, targetfile))
if os.path.isdir(filename):
shutil.copytree(filename, targetfile)
else:
shutil.copy2(filename, targetfile)
updates += 1
def _incremental_copy_code(targetdir, buildtype):
import glob
if osname != "Windows":
if targetdir == bindir:
fl = [] # bin/ is all handled by make, don't do jack.
else:
fl = glob.glob("bin/*")
else:
lgpl_tt = {"debug":"Debug", "rc":"Release", "final":"Release"}
# Gather binaries from makefile.
fl = []
with open("makefile", "rt") as rm:
for line in rm:
obj = line.strip()
if obj:
obj = obj.split()[0]
if obj.startswith("thirdparty/"):
fl += glob.glob(os.path.join(obj, lgpl_tt[buildtype], "*.dll"))
elif list(filter(lambda x: x, [obj.startswith(an) for an in appnames])):
fl += glob.glob(os.path.join(obj, own_tt[buildtype], "*.exe"))
_incremental_copy(fl, targetdir, buildtype)
def _incremental_copy_data(sourcedir, targetdir, buildtype):
import glob
datadir = os.path.join(sourcedir, "data")
fl = glob.glob(datadir+"/*.class") + glob.glob(datadir+"/*.mesh") + glob.glob(datadir+"/*.phys") + \
glob.glob(datadir+"/*.jpg") + glob.glob(datadir+"/*.png") + glob.glob(datadir+"/*.tga") + glob.glob(datadir+"/*.bmp") + \
glob.glob(datadir+"/*.wav") + glob.glob(datadir+"/*.ogg") + glob.glob(datadir+"/*.mp3") + glob.glob(datadir+"/*.xm")
targetdata = os.path.join(targetdir, "data")
_incremental_copy(fl, targetdata, buildtype)
def _cleandata(da_dir):
global removes
import glob
fl = glob.glob(da_dir+"/*.class") + glob.glob(da_dir+"/*.mesh") + glob.glob(da_dir+"/*.phys")
for filename in fl:
os.remove(filename)
removes += 1
return removes
def _cleandata_source(sourcedir):
targetdir=bindir
global removes
removes += _cleandata(os.path.join(sourcedir, "data"))
removes += _cleandir(os.path.join(targetdir, "data"))
def _cleandir(da_dir):
global removes
import glob
fl = glob.glob(da_dir + "/*") + glob.glob(da_dir + '/.*')
for filename in fl:
if os.path.isdir(filename):
removes += _cleandir(filename)
os.rmdir(filename)
removes += 1
else:
if filename.startswith("/"):
print("FATAL INTERNAL ERROR!")
sys.exit(1)
os.remove(filename)
removes += 1
return removes
def _checkplatform():
if sys.platform == 'darwin':
ios = os.environ.get('PD_BUILD_IOS')
if ios == None:
print("You must set env var PD_BUILD_IOS to 0 or 1 depending on your target. Exiting.")
sys.exit(1)
def _printresult(print_no_work=True):
global showed_result, updates, removes, fullname
if showed_result:
return
showed_result = True
if updates+removes: print("%s operation successful, %i resulting files updated(/removed)." % (fullname, updates+removes))
elif print_no_work: print("%s build up-to-date." % fullname)
def _createmakes(force=False):
if os.path.exists("makefile") and not force:
return
os.chdir(makefilescriptdir)
r = [sys.executable, makefilescript]
if default_build_mode != 'debug':
r += ['--release']
rgohelp._run(r, "generating makefiles")
cnt = len(makefilescriptdir.split("/"))
os.chdir("/".join([".."]*cnt))
def _posix_no_lib_exes(targetdir):
if os.name == "nt":
return
# Only executables are executable... Hurm...
import glob
libs = glob.glob(os.path.join(targetdir, "lib*.so*"))
for lib in libs:
rgohelp._run(["chmod", "-x", lib], "changing .so +x status to -x")
def _create_zip(targetdir, targetfile, buildtype):
_posix_no_lib_exes(targetdir)
print("Building compressed archive.")
if sys.platform in ("win32", "darwin"):
targetfile += ".zip" if buildtype == "final" else ".iszip"
rgohelp._zipdir(targetdir, lambda x: True, targetfile)
else:
targetfile += ".tar.gz" if buildtype == "final" else ".tar.isgz"
rgohelp._targzdir(targetdir, targetfile)
return targetfile
def _buildzip(builder, buildtype):
rgohelp._verify_base_dir()
bestname = fullname if osname != 'Linux' else fullname.replace(' ','')
subdir = bestname if not ismac else '.'
targetdir = bestname
if buildtype == "rc":
targetdir = "PRE_ALPHA."+targetdir
elif buildtype != "final":
targetdir = "NO_RELEASE."+targetdir
targetdir = 'tmp/'+targetdir
os.makedirs(targetdir)
builder(targetdir, buildtype)
for app in appnames:
if bestname != app and os.path.exists(targetdir+'/'+app):
os.rename(targetdir+'/'+app, targetdir+'/'+bestname)
tmpdirs = ('tmp', '..') if not ismac else (targetdir, '../..')
os.chdir(tmpdirs[0])
targetfile = _create_zip(subdir, bestname, buildtype)
os.chdir(tmpdirs[1])
nicefile = bestname+"."+osname+"."+hwname+"."+buildtype+"."+datename+'.'+targetfile.split('.',1)[1]
os.rename(tmpdirs[0]+'/'+targetfile, nicefile)
_cleandir('tmp')
os.rmdir('tmp')
print("Built and zipped into %s." % nicefile)
def _copybin(targetdir, buildtype):
import glob
fl = glob.glob("bin/*")
_incremental_copy(fl, targetdir, buildtype, True)
fl = glob.glob("bin/data/*")
_incremental_copy(fl, os.path.join(targetdir, "data"), buildtype)
def _builddata(sourcedir, targetdir, buildtype):
_incremental_build_data(sourcedir)
_incremental_copy_data(sourcedir, targetdir, buildtype)
def _rebuild(sourcedir, targetdir, buildtype):
rgohelp._verify_base_dir()
if rgohelp._hasdevenv(verbose=True):
_createmakes(force=True)
_cleandir(targetdir)
_buildcode("rebuild", buildtype)
_incremental_copy_code(targetdir, buildtype)
else:
_cleandir(targetdir+"/data")
_cleandata_source(targetdir)
_builddata(sourcedir+"/data", targetdir+"/data", buildtype)
def _macappify(exe, name):
global updates
os.chdir("bin")
import glob
fl = glob.glob("*")
fs = []
for f in fl:
if os.path.isfile(f):
fs += [f]
for i in fs:
for o in fs:
#print("install_name_tool -change %s @executable_path/%s %s" % (o, o, i))
os.system("install_name_tool -change %s @executable_path/%s %s" % (o, o, i))
import shutil
shutil.copytree("../tools/build/macosx", name+".app")
for f in fs:
os.rename(f, os.path.join(name+".app/Contents/MacOS", f))
updates += 1
try:
os.rename("data", name+".app/Contents/Resources/data")
updates += 1
shutil.copy("../"+name+"/Icons/Main.icns", exe+".app/Contents/Resources")
updates += 1
except:
pass
plist = ".app/Contents/Info.plist"
r = open(name+plist, "rt")
w = open(name+plist+".tmp", "wt")
for line in r:
w.write(line.replace("@EXE_NAME@", exe).replace("@BUNDLE_NAME@", name))
r.close()
w.close()
os.remove(name+plist)
os.rename(name+plist+".tmp", name+plist)
updates += 1
os.chdir("..")
def _demacappify(wildcard):
try:
os.mkdir("bin")
except:
pass
os.chdir("bin")
import glob
import shutil
apps = glob.glob(wildcard)
for app in apps:
fl = glob.glob(os.path.join(app, "Contents/MacOS/*"))
fl += glob.glob(os.path.join(app, "Contents/Resources/data"))
for f in fl:
os.rename(f, os.path.split(f)[1])
shutil.rmtree(app)
os.chdir("..")
def _include_data_files(fn):
fn = fn.lower()
return fn.endswith(".class") or \
fn.endswith(".mesh") or \
fn.endswith(".phys") or \
fn.endswith(".wav") or \
fn.endswith(".png") or \
fn.endswith(".jpg") or \
fn.endswith(".tga") or \
fn.endswith(".tif") or \
fn.endswith(".tiff") or \
fn.endswith(".bmp") or \
fn.endswith(".xm")
def _prepare_run():
os.chdir(bindir)
pre = "./"
post = ""
if os.name == "nt":
pre = ""
post = ".exe"
if not os.path.exists(appnames[0]+post):
reason = ("binaries (%s) not compiled" % (bindir+'/'+appnames[0]+post)) if rgohelp._hasdevenv() else "missing C++ build environment"
print("Could not run %s due to %s." % (appnames[0], reason))
sys.exit(2)
return pre, post
def _bgrun(name):
_printresult(False)
pre, post = _prepare_run()
import subprocess
subprocess.Popen(pre+name+post, shell=True)
os.chdir("..")
def _fgrun(name, app=""):
_printresult(False)
pre, post = _prepare_run()
app = [app] if app else []
rgohelp._run(app+[pre+name+post], "run")
os.chdir("..")
def _getmethods():
methods = [(m, callable(eval(m))) for m in dir(sys.modules["__main__"])]
methods,_ = zip(*filter(lambda x: x[1], methods))
methods = list(filter(lambda n: not (n.startswith('_') or n.startswith('get')), methods))
return methods
def _runlocal(script, args):
if os.path.exists(appnames[0]+'/script/'+script+'.py'):
os.chdir(appnames[0]+'/data')
rgohelp._run([sys.executable, '-OO', '../script/'+script+'.py'], ' '.join([script]+args))
os.chdir('../..')
return True
return False
#-------------------- High-level build stuff below. --------------------
def macappify():
global appnames
global fullname
_macappify(appnames[0], fullname)
def demacappify():
_demacappify("*.app")
def cleandata():
global appnames
_cleandata_source(appnames[0])
def builddata():
global appnames
_builddata(appnames[0], bindir, default_build_mode)
def zipdata():
global appnames, updates
datadir = appnames[0] + '/data'
os.chdir(datadir)
rgohelp._zipdir('', _include_data_files, "data.pk3")
os.chdir('../..')
updates += 1
# Replace bin/data too.
_cleandata_source(bindir)
import glob
fl = [datadir+"/data.pk3"] + glob.glob(datadir+"/*.ogg") + glob.glob(datadir+"/*.mp3") # Music goes outside of the .zip.
targetdata = os.path.join(bindir, "data")
_incremental_copy(fl, targetdata, default_build_mode)
def buildcode():
targetdir=bindir
buildtype=default_build_mode
if rgohelp._hasdevenv(verbose=True):
_createmakes()
_buildcode("build", buildtype)
_incremental_copy_code(targetdir, buildtype)
def copycode():
targetdir=bindir
buildtype=default_build_mode
_createmakes()
_incremental_copy_code(targetdir, buildtype)
def clean():
targetdir=bindir
buildtype=default_build_mode
rgohelp._verify_base_dir()
cleandata()
if rgohelp._hasdevenv(verbose=True):
global removes
removes += _cleandir(targetdir)
_buildcode("clean", buildtype)
def run():
copycode()
_fgrun(appnames[0])
def go():
builddata()
copycode()
run()
def archive():
if default_build_mode != "final":
print("Warning: archiving should probably be run on final (=release) artifacts.")
_buildzip(_copybin, ziptype)
sys.exit(0)
def set_target():
global args
appdata = args
args = []
_save_target_app(appdata)
def _main():
usage = "usage: %prog [options] <filespec>\n" + \
"Runs some type of build command. Try build, rebuild, clean, builddata, or something like that."
parser = optparse.OptionParser(usage=usage, version="%prog 0.2")
parser.add_option("-m", "--buildmode", dest="buildmode", default="debug", help="Pick one of the build modes: %s. Default is debug." % ", ".join(buildtypes))
parser.add_option("-c", "--chartype", dest="chartype", default="ansi", help="Pick char type: ansi/unicode (i.e. char/wchar_t). Default is ansi.")
parser.add_option("-a", "--demacappify", dest="demacappify", default=ismac, action="store_true", help="Quietly try to de-Mac-.App'ify the target before building; default is %s." % str(ismac))
parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help="Verbose mode; default is False.")
parser.add_option("-e", "--pause-on-error", dest="pause_on_error", default=False, action="store_true", help="Pause on error; default is False.")
global args
options, args = parser.parse_args()
if len(args) < 1:
print("Need arg! Pick one of:\n %s\n" % "\n ".join(_getmethods()))
sys.exit(1)
if not options.buildmode in buildtypes:
print("Unknown build mode!")
sys.exit(1)
global default_build_mode
default_build_mode = options.buildmode
global ziptype
ziptype = default_build_mode
global own_tt
own_tt = builddir_types[options.chartype]
global verbose
verbose = options.verbose
rgohelp.pause_on_error = options.pause_on_error
if options.demacappify and not any(a in exclude_demacappify for a in args):
demacappify()
_checkplatform()
if args[0] != "set_target":
_load_target_app()
while args:
try:
arg = args[0]
args = args[1:]
if not _runlocal(arg, args):
exec(arg+"()")
except NameError as e:
print("Error: no such command %s!" % arg)
suggestions = []
import difflib
for name in _getmethods():
match = difflib.SequenceMatcher(None, arg, name).ratio()
if match > 0.5:
suggestions += [(match, name)]
if suggestions:
suggestions = sorted(suggestions, reverse=True)
if suggestions[0][0] > 0.9:
print("Did you mean %s?" % suggestions[0][1])
else:
suggestions = list(map(lambda mn: mn[1], suggestions))
print("Perhaps: %s?" % ", ".join(suggestions))
sys.exit(1)
_printresult()
if __name__ == "__main__":
_main()
|
[] |
[] |
[
"PD_BUILD_IOS"
] |
[]
|
["PD_BUILD_IOS"]
|
python
| 1 | 0 | |
assumerole/identity.py
|
import configparser
import os
import boto3
import json
import time
import botocore.exceptions
from botocore.session import Session
def check_aws_config_file():
config = None
aws_config_file = os.environ['HOME'] + "/.aws/config"
if os.path.isdir(os.environ['HOME'] + "/.aws"):
if os.path.isfile(aws_config_file):
config = configparser.ConfigParser()
config.read(aws_config_file)
if not os.path.isdir(os.environ['HOME'] + "/.aws/cached_tokens"): # pragma: no cover
os.makedirs(os.environ['HOME'] + "/.aws/cached_tokens")
else: # pragma: no cover
print(aws_config_file + " not found. Exiting")
else: # pragma: no cover
print("~/.aws folder not found. Exiting")
return config
def set_profile(config, aws_profile_name, expire_duration_hours=8):
list_aws_profile = config.sections()
if "profile " + aws_profile_name in list_aws_profile:
session = "dev"
aws_config = Session(profile=aws_profile_name).get_scoped_config()
# Construct assume role request
assert "role_arn" in aws_config, f"{aws_profile_name} does not have role_arn."
rq = {
"RoleArn": aws_config["role_arn"],
"RoleSessionName": session,
"DurationSeconds": (expire_duration_hours*3600)
}
expire_time = int(time.time()) + (expire_duration_hours*3600)
# Add MFA token if needed
if "mfa_serial" in aws_config: # pragma: no cover
print("\n Enter MFA Code:")
mfa_code = input()
rq["SerialNumber"] = aws_config["mfa_serial"]
rq["TokenCode"] = mfa_code
# Get auth token
try:
sts = boto3.client("sts")
sts_response = sts.assume_role(**rq)
sts_response['Credentials']["Expiration"] = expire_time
cached_folder = os.environ['HOME'] + "/.aws/cached_tokens"
with open(cached_folder + "/" + aws_profile_name + ".txt", "w") as fp:
fp.write(json.dumps(sts_response))
fp.close()
assume_role_status = True
except botocore.exceptions.ClientError as ex:
print(ex.response)
print("\nProfile {0} not set correctly. Please retry with correct credentials\n".format(aws_profile_name))
assume_role_status = False
else:
print("aws profile not found\n")
assume_role_status = False
return assume_role_status
def check_cached_token(aws_profile_name):
cached_folder = os.environ['HOME'] + "/.aws/cached_tokens"
aws_cached_file = cached_folder + "/" + aws_profile_name + ".txt"
if os.path.isfile(aws_cached_file):
with open(aws_cached_file, "r") as fp:
cached_string = fp.read()
fp.close()
try:
cached_config = json.loads(cached_string)
except json.decoder.JSONDecodeError: # pragma: no cover
cached_config = {}
else:
cached_config = {}
expiration = cached_config.get("Credentials", {}).get("Expiration", -1)
if expiration != -1:
if int(time.time()) <= expiration:
token_expired = False
else: # pragma: no cover
token_expired = True
else: # pragma: no cover
token_expired = True
return token_expired
def set_cached_token(aws_profile_name):
cached_folder = os.environ['HOME'] + "/.aws/cached_tokens"
aws_cached_file = cached_folder + "/" + aws_profile_name + ".txt"
if os.path.isfile(aws_cached_file):
with open(aws_cached_file, "r") as fp:
cached_string = fp.read()
fp.close()
try:
cached_config = json.loads(cached_string)
except json.decoder.JSONDecodeError: # pragma: no cover
cached_config = {}
else:
cached_config = {}
my_env = os.environ.copy()
set_command = ""
variable_mapping = dict()
variable_mapping["AWS_ACCESS_KEY_ID"] = "AccessKeyId"
variable_mapping["AWS_SECRET_ACCESS_KEY"] = "SecretAccessKey"
variable_mapping["AWS_SESSION_TOKEN"] = "SessionToken"
variable_mapping["AWS_SECURITY_TOKEN"] = "SessionToken"
if cached_config != {} and cached_config.get("Credentials", {}) != {}:
cached_credentials = cached_config["Credentials"]
for target, source in variable_mapping.items():
my_env[target] = cached_credentials.get(source, "")
my_env['ASSUMED_ROLE'] = aws_profile_name
set_cmd = list()
for target, source in variable_mapping.items():
set_cmd.append("unset {0}".format(target))
set_cmd.append("export {0}=\"{1}\"".format(target, cached_credentials.get(source, "")))
set_cmd.append("unset ASSUMED_ROLE")
set_cmd.append("export ASSUMED_ROLE='{0}'".format(aws_profile_name))
set_command = ';'.join(set_cmd)
return my_env, set_command
def assume_role(aws_profile_name, force_refresh=False, expire_duration_hours=8):
config = check_aws_config_file()
os_env = os.environ.copy()
command = ""
if config is not None:
if check_cached_token(aws_profile_name) or force_refresh:
if not set_profile(config, aws_profile_name, expire_duration_hours):
return os_env, command
os_env, command = set_cached_token(aws_profile_name)
os.environ = os_env
return os_env, command
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
tests/highlevel/debugmode-py-light.py
|
from seamless.highlevel import Context, Cell
import traceback
ctx = Context()
def func(a, b):
aa = a**2
bb = b**2
return aa+bb
ctx.tf = func
ctx.tf.a = 10
ctx.tf.b = 20
try:
ctx.tf.debug.enable("light")
except Exception:
traceback.print_exc(limit=0)
ctx.compute()
print()
try:
#import os; os.environ.pop("HOSTCWD", None)
ctx.tf.debug.enable("light")
except Exception:
traceback.print_exc(limit=0)
print()
#ctx.tf.code.mount("debugmount/debugmode-py-light-code.py", authority="cell")
ctx.code = ctx.tf.code.pull()
ctx.code.mount("debugmount/debugmode-py-light-code.py", authority="cell")
ctx.translate()
ctx.tf.debug.enable("light")
import traceback
print("Error 1")
try:
ctx.translate(force=True)
except Exception:
traceback.print_exc()
print()
print("Error 2")
try:
ctx.set_graph({})
except Exception:
traceback.print_exc()
print()
print("Error 3")
try:
del ctx.tf
except Exception:
traceback.print_exc()
print()
print("START")
ctx.tf.a = 11
ctx.compute()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/scaleway/conn.go
|
package scaleway
import (
"fmt"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/scaleway/scaleway-sdk-go/scw"
"github.com/sirupsen/logrus"
"log"
"os"
"strconv"
"time"
)
func CreateSessionWithZone(zone scw.Zone) *scw.Client {
region, zoneErr := zone.Region()
if zoneErr != nil {
logrus.Fatalf("Unknown zone %s: %s", zone.String(), zoneErr.Error())
}
client, err := scw.NewClient(
scw.WithDefaultZone(zone),
scw.WithDefaultRegion(region),
scw.WithAuth(os.Getenv("SCW_ACCESS_KEY"), os.Getenv("SCW_SECRET_KEY")),
)
if err != nil {
logrus.Errorf("Can't connect to Scaleway: %s", err)
os.Exit(1)
}
return client
}
func CreateSessionWithRegion(region scw.Region) *scw.Client {
client, err := scw.NewClient(
scw.WithDefaultRegion(region),
scw.WithAuth(os.Getenv("SCW_ACCESS_KEY"), os.Getenv("SCW_SECRET_KEY")),
)
if err != nil {
logrus.Errorf("Can't connect to Scaleway: %s", err)
os.Exit(1)
}
return client
}
func CreateMinIOSession(scwSession *scw.Client) *minio.Client {
region, _ := scwSession.GetDefaultRegion()
endpoint := fmt.Sprintf("s3.%s.scw.cloud", region)
access, _ := scwSession.GetAccessKey()
secret, _ := scwSession.GetSecretKey()
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(access, secret, ""),
Region: string(region),
})
if err != nil {
log.Fatalln(err)
}
return minioClient
}
func volumeTimeout() time.Duration {
env, ok := os.LookupEnv("SCW_VOLUME_TIMEOUT")
if ok {
timeout, err := strconv.Atoi(env)
if err != nil {
logrus.Errorf("Can't parse VOLUME_TIMEOUT variable. Set to default (2 hours)")
return 2
}
return time.Duration(timeout)
}
return 2
}
func GetRegionfromZone(zone string) string {
scwZone := scw.Zone(zone)
scwRegion, err := scwZone.Region()
if err != nil {
logrus.Errorf("Can't get region for zone %s: %s", zone, err.Error())
return ""
}
return scwRegion.String()
}
|
[
"\"SCW_ACCESS_KEY\"",
"\"SCW_SECRET_KEY\"",
"\"SCW_ACCESS_KEY\"",
"\"SCW_SECRET_KEY\""
] |
[] |
[
"SCW_SECRET_KEY",
"SCW_ACCESS_KEY"
] |
[]
|
["SCW_SECRET_KEY", "SCW_ACCESS_KEY"]
|
go
| 2 | 0 | |
api/search/views.py
|
import concurrent
from concurrent.futures import ThreadPoolExecutor, wait, as_completed
from django.shortcuts import render
from django.template import RequestContext
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
# desired operation below
# m -> x?
# d1 -> a b
# d2 -> c
# d3 ->d e
#
#
#
# 1. x
# 2. x a
# 3. x a c
# 4. x a c d
# 5. x a c e
# 6. x b
# 7. x b c
# 8. x b c d
# 9. x b c e
import json
from django.http import HttpResponse, HttpResponseRedirect
from pymongo import MongoClient
from elasticsearch import Elasticsearch
from api.mainquery.views import Dimension
import os
from api.search.models import AnnotatedArticle
from django.template.response import TemplateResponse
import pymongo
import collections
from nltk import FreqDist
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
import requests
import xmltodict
from collections import Counter
import sys
@csrf_exempt
def startSearch(request):
if request.method == "POST":
print("post")
main_query = request.POST.get("main_query")
dimensions = request.POST.get("dimensions")
print("main: ", main_query)
print("dimensions: ", dimensions)
dimensions_json = json.loads(dimensions)["dimensions"]
resp = Search.search_annotated_articles(main_query, dimensions_json)
resp["dimensions"]=dimensions_json
resp["mainquery"]=main_query
data = {}
data["keyword_pairs"] = json.dumps(resp)
request.session['keyword_pairs'] = data
return HttpResponseRedirect(reverse("summaryPage"))
def highlight(text, start, end):
return text[:start] + '<span style="background-color: #FFFF00">' + text[start:end] + '</span>' + text[end:]
def annotations(request, articleId):
if request.method == "GET":
annotationId = request.GET.get("annotationId")
mongo_client = MongoClient(
host='mongodb:27017', # <-- IP and port go here
serverSelectionTimeoutMS=3000, # 3 second timeout
username='root',
password='mongoadmin',
)
db = mongo_client["mentisparchment_docker"]
title = ""
authors = ""
keywords = ""
abstract = ""
column = db["annotated_article_ids"]
query = {"id": str(articleId)}
articles = column.find(query)
for item in articles:
list_item = dict(item)
title = list_item["title"]
authors = list_item["author_list"]
keywords = list_item["top_three_keywords"]
abstract = list_item["abstract"]
break
authors_str = "; ".join(authors)
# Check whether an annotation id is given.
if annotationId is None:
return render(request, "html/article.html",
{"title": title, "authors": authors_str, "abstract": abstract})
else:
pm_id = ""
column = db["annotation_to_article"]
query = {"annotation_id": int(annotationId)}
annotation_to_article = column.find(query)
for item in annotation_to_article:
list_item = dict(item)
pm_id = list_item["pm_id"]
break
# Check whether the given annotation is related to the given article.
if pm_id != "" and pm_id == str(articleId):
print("Such annotation exists for such article")
start = 0
end = 0
column = db["annotation"]
query = {"id": int(annotationId)}
annotations = column.find(query)
for item in annotations:
list_item = dict(item)
start = list_item["target"]["selector"]["start"]
end = list_item["target"]["selector"]["end"]
# Find which part of the article this annotation is from.
if start < len(title):
title = highlight(title, start, end)
elif start < len(title + authors_str):
offset = len(title)
authors_str = highlight(authors_str, start - offset, end - offset)
else:
offset = len(title + authors_str)
abstract = highlight(abstract, start - offset, end - offset)
return render(request, "html/article.html",
{"title": title, "authors": authors_str, "keywords": keywords,
"abstract": abstract})
else:
print("This annotation is not related to this article")
return render(request, "html/article.html",
{"title": title, "authors": authors_str, "keywords": keywords, "abstract": abstract})
class Search:
@staticmethod
def search_annotated_articles(main_query, dimensions_json):
helper = SearchHelper(main_query)
helper.create_search_combinations(dimensions_json)
helper.create_search_keys()
articles = helper.start_query()
del helper
return articles
class SearchHelper(object):
mongo_client = ""
db = ""
annotation_column = ""
annotation_detail_column = ""
articles = []
article_details = {}
search_result_list = []
articles_by_term = {}
def __init__(self, main_query):
self.main_query = main_query.lower()
self.dimensions = []
self.combinations = []
# we will use this later while parsing the articles
self.all_terms = []
self.search_result_list = []
self.articles_by_term = {}
self.mongo_client = MongoClient(
host='mongodb:27017', # <-- IP and port go here
username='root',
password='mongoadmin',
maxPoolSize=2000
)
self.db = self.mongo_client["mentisparchment_docker"]
self.annotation_column = self.db["annotation"]
self.annotation_detail_column = self.db["annotated_article_ids"]
def start_annotations(self, combination):
common_article_list = []
print("new combination", combination)
# split the combination list
combination_line = combination.split(")")
urls = []
if len(combination_line) > 1:
for each_keyword_combination in combination_line:
if len(each_keyword_combination) > 0:
urls.append(self.articles_by_term[each_keyword_combination])
common_article_list = list(set.intersection(*map(set, urls)))
elif len(combination_line) == 1:
common_article_list = self.articles_by_term[combination_line[0]]
print("common article list created for ", combination, " total article list ", len(common_article_list))
if len(common_article_list) > 0:
article_details_futures = []
articles = []
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
article_details_futures.append(executor.submit(self.get_article_details, common_article_list))
for x in as_completed(article_details_futures):
articles = x.result()
print("articles created for ", combination)
if len(articles) > 0:
search_result = SearchResult(combination)
search_result.add_articles(articles)
SearchResult.summary_articles(search_result, articles)
print("articles summarized for ", combination)
self.search_result_list.append(search_result)
del search_result
del articles
else:
search_result = SearchResult(combination)
search_result.empty_result=True
self.search_result_list.append(search_result)
common_article_list.clear()
def start_query(self):
search_result_list = []
response = {}
response["keyword_pairs"] = []
for keyword in self.all_terms:
# query annotations by keyword retrieve article id
# query elastic by keyword retrieve article id
# combine them together without duplicate
# append combined list into articles_by_term
article_list_from_annotation = self.get_article_ids_from_annotations(keyword)
article_list_from_elastic = self.get_article_ids_from_elastic_with_proximity_and_fuzzy(keyword)
article_list_from_annotation_as_set = set(article_list_from_annotation)
article_list_from_elastic_as_set = set(article_list_from_elastic)
list_elastic_items_not_in_list_annotation = list(
article_list_from_elastic_as_set - article_list_from_annotation_as_set)
combined = article_list_from_annotation + list_elastic_items_not_in_list_annotation
del article_list_from_annotation
del article_list_from_elastic
del article_list_from_elastic_as_set
self.articles_by_term[keyword] = combined
if len(self.combinations) > 0:
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.combinations)) as executor:
futures = []
for combination in self.combinations:
futures.append(executor.submit(self.start_annotations, combination))
dict_futures = []
work_num = 0
if len(self.search_result_list) > 0:
work_num = len(self.search_result_list)
else:
work_num = 1
#sort search result list
self.search_result_list.sort(key=lambda x: x.number_of_article, reverse=False)
with concurrent.futures.ThreadPoolExecutor(max_workers=work_num) as executor:
while self.search_result_list:
dict_futures.append(executor.submit(self.search_result_list.pop().generate_dict_value, response))
for x in as_completed(dict_futures):
print("dict value created")
return response
def create_search_combinations(self, dimensions_json):
for dimension in dimensions_json:
dimension_obj = Dimension()
for keyword in dimension['keywords']:
dimension_obj.add_keyword(keyword.lower())
self.dimensions.append(dimension_obj)
self.start_parsing()
def start_parsing(self):
dimension_number = len(self.dimensions)
for i in range(dimension_number):
self.start_keyword_pairing(dimension_number, i)
if len(self.main_query) > 0:
self.combinations.append(self.main_query)
print("All search combinations: ", self.combinations)
def start_keyword_pairing(self, dimension_number, current_index):
# iterate for all keyword for each dimension
for keyword in self.dimensions[current_index].keywords:
if len(self.main_query) > 0:
keyword = self.main_query + ")" + keyword
self.combinations.append(keyword)
current_keyword_pairing = ""
# other_dimension_index means the index from another dimensions
for other_dimension_index in range(dimension_number):
# a-> c already done c->a or a->b should not walk again and again!
if other_dimension_index == current_index or other_dimension_index < current_index:
pass
else:
# other_dimension_keyword means the keyword from another dimensions
for other_dimension_keyword in self.dimensions[other_dimension_index].keywords:
another_inside_str = ""
# check it is last element
if current_index != dimension_number - 1:
current_keyword_pairing = keyword + ")" + other_dimension_keyword
self.combinations.append(current_keyword_pairing)
# iterate through 6th dimension!
self.iterate_keyword_pairing(current_keyword_pairing, dimension_number, current_index,
keyword,
other_dimension_keyword,
other_dimension_index, 1)
def iterate_keyword_pairing(self, current_keyword_pairing, dimension_number, current_index, keyword,
other_dimension_keyword,
other_dimension_index,
index):
# 6th dimension hardcoded!
if other_dimension_index != dimension_number - index and index != 6:
for next_keyword in self.dimensions[other_dimension_index + index].keywords:
new_keyword_pairing = current_keyword_pairing + ")" + next_keyword
self.combinations.append(new_keyword_pairing)
# new_keyword_pairing becomes another inside str
self.iterate_keyword_pairing(new_keyword_pairing, dimension_number, current_index, keyword,
other_dimension_keyword,
other_dimension_index, index + 1)
def elastic_search(self, main_query):
es = Elasticsearch(hosts=["es01"])
res = es.search(
index="test5",
body={
"query": {
"match": {
"abstract": main_query
}
}
}
)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(_created)s %(title)s by %(authors)s (%(keywords)s): %(abstract)s" % hit["_source"])
# we dont need to retrieve all retrieve for each key again and again. instead we will use this list while
# retrieving the articles
def create_search_keys(self):
self.all_terms.append(self.main_query)
for dimension_line in self.dimensions:
for keyword in dimension_line.keywords:
self.all_terms.append(keyword)
# takes article ids from mongodb with its keyword
def get_article_ids_from_annotations(self, keyword):
query = {}
article_id_list = []
query["body.items.value"] = keyword
document = self.annotation_column.find(query)
for x in document:
list_item = dict(x)
target_id_str = list_item["target"]["id"].split("/")
if target_id_str[len(target_id_str) - 1] not in article_id_list:
article_id_list.append(target_id_str[len(target_id_str) - 1])
return article_id_list
# returns a dict that consist all articles in the mongodb
def retrieve_all_articles(self):
mongo_query = {}
document = self.annotation_detail_column.find(mongo_query)
all_papers = {}
for x in document:
list_item = dict(x)
all_papers[list_item["id"]] = list_item
return all_papers
# takes article details from mongodb with its keyword
def article_details_query(self, article_id):
mongo_query = {}
mongo_query["id"] = article_id
document = self.annotation_detail_column.find(mongo_query)
for x in document:
list_item = dict(x)
article = Article(pm_id=list_item["id"],
title=list_item["id"],
journal_issn="",
journal_name=list_item["journal_name"],
abstract="",
pubmed_link=list_item["pubmed_link"],
author_list=list_item["author_list"],
instutation_list=list_item["institution_list"],
article_date=list_item["article_date"],
top_three_keywords=list_item["top_three_keywords"])
del list_item
del document
return article
# create collection of details of articles
def get_article_details(self, article_list):
articles = []
all_articles = self.retrieve_all_articles()
# create all articles from given list
for article_id in article_list:
list_item = all_articles[article_id]
if article_id == list_item["id"]:
article = Article(pm_id=list_item["id"],
title=list_item["title"],
journal_issn="",
journal_name=list_item["journal_name"],
abstract="",
pubmed_link=list_item["pubmed_link"],
article_link=os.environ.get("ARTICLE_URL", " ") + "/articles/" + list_item["id"],
author_list=list_item["author_list"],
instutation_list=list_item["institution_list"],
article_date=list_item["article_date"],
top_three_keywords=list_item["top_three_keywords"],
article_type=list_item["article_type"])
articles.append(article)
return articles
def get_article_ids_from_elastic(self, keyword):
es = Elasticsearch(hosts=["es01"])
res = es.search(
index="test5",
body={
"query": {
"multi_match":
{"query": keyword,
"fields": ["abstract", "keywords"]
}
}
}
)
result = []
for hit in res['hits']['hits']:
result.append(hit["_source"]['article_id'])
del res
return result
def find_stored_article_number(self):
mongo_query = {}
document = self.annotation_detail_column.find(mongo_query)
unique_papers = []
for x in document:
list_item = dict(x)
if list_item["id"] not in unique_papers:
unique_papers.append(list_item["id"])
return len(unique_papers)
def find_annotation_size(self):
mongo_query = {}
document = self.annotation_column.find(mongo_query)
return document.count()
def get_article_ids_from_elastic_with_proximity(self, keyword):
slop_option=4
es = Elasticsearch(hosts=["es01"])
res = es.search(
index="test5",
body={
"query": {
"match_phrase": {
"abstract": {"query": keyword,
"slop": slop_option
}
}
}
}
)
result = []
for hit in res['hits']['hits']:
result.append(hit["_source"]['article_id'])
del res
res = es.search(
index="test5",
body={
"query": {
"match_phrase": {
"keywords": {"query": keyword,
"slop": slop_option
}
}
}
}
)
for hit in res['hits']['hits']:
result.append(hit["_source"]['article_id'])
del res
return result
#this method allows us to search keyword fuzzy and proximity features of elastic
#proximity means there can be several keywords between subkeywords
#fuzzy means, user may mistype the keyword
def get_article_ids_from_elastic_with_proximity_and_fuzzy(self,keyword):
es = Elasticsearch(hosts=["es01"])
body= self.fuzzy_proximity_search_creator("abstract",keyword)
json_res=body
res = es.search(
index="test5",
body=json_res
)
result = []
for hit in res['hits']['hits']:
result.append(hit["_source"]['article_id'])
del res
body = self.fuzzy_proximity_search_creator("keywords", keyword)
json_res = body
res = es.search(
index="test5",
body=json_res
)
for hit in res['hits']['hits']:
result.append(hit["_source"]['article_id'])
del res
return result
def fuzzy_proximity_search_creator(self,type,keyword):
clauses=[]
slop_option = 5
fuzziness=2
in_order="true"
keyword_list=keyword.split(" ")
for subkeyword in keyword_list:
clause={
"span_multi":{
"match":{
"fuzzy":{
type:{
"value":subkeyword,
"fuzziness":fuzziness
}
}
}
}
}
clauses.append(clause)
resp = {
"query": {
"bool": {
"must": [
{
"span_near": {
"clauses": clauses,
"slop": slop_option,
"in_order":in_order
}
}
]
}
}
}
return json.dumps(resp)
# it is similar class with annotate_abstract
class Article(object):
def __init__(self, pm_id, title, journal_issn, journal_name, abstract, pubmed_link, article_link, author_list,
instutation_list, article_date, top_three_keywords, article_type):
self.pm_id = pm_id
self.title = title
self.journal_issn = journal_issn
self.journal_name = journal_name
self.abstract = abstract
self.pubmed_link = pubmed_link
self.article_link=article_link
self.author_list = author_list
self.instutation_list = instutation_list
self.article_date = article_date
self.top_three_keywords = top_three_keywords
self.article_type = article_type
self.json_val = self.toJSON()
def toJSON(self):
return {
"pm_id": self.pm_id,
"title": self.title,
"authors": self.author_list,
"pubmed_link": self.pubmed_link,
"article_link":self.article_link,
"article_type": self.article_type,
"article_date": self.article_date
}
class Author(object):
name_surname: ""
institute = ""
def __init__(self, name, Institute):
self.name_surname = name
self.institute = Institute
class Institute(object):
name: ""
location: ""
def __init__(self, name, location):
self.name = name
self.location = location
class TopKeyword(object):
top_keyword = ""
def __init__(self, top_keyword):
self.top_keyword = top_keyword
class SearchResult(object):
keyword = ""
number_of_article = 0
articles = []
top_keywords = []
top_authors = []
result_change_time_years = []
result_change_time_numbers = []
pm_ids = []
authors = []
def __init__(self, keyword):
self.keyword = keyword
self.number_of_article = 0
self.articles = []
self.top_keywords = []
self.top_authors = []
self.result_change_time_years = []
self.result_change_time_numbers = []
self.pm_ids = []
self.authors = []
self.empty_result=False
def change_article_number(self, article_number):
self.number_of_article = article_number
def add_article(self, article):
self.articles.append(article)
def add_articles(self, articles):
for article in articles:
self.articles.append(article)
def add_author(self, author):
self.authors.append(author)
def add_top_authors(self, authors):
for author in authors:
self.top_authors.append(author)
def add_top_keyword(self, keyword):
self.top_keywords.append(keyword)
def add_top_keywords(self, keywords):
for keyword in keywords:
self.top_keywords.append(keyword)
def add_year(self, year):
self.result_change_time_years.append(year)
def add_years(self, years):
for year in years:
self.result_change_time_years.append(year)
def add_number_of_year(self, number):
self.result_change_time_numbers.append(number)
def add_number_publication_per_years(self, numbers):
for number in numbers:
self.result_change_time_numbers.append(number)
def generate_json_value(self):
s1 = "{"
s3 = "}"
str = f'"value":{self.keyword.replace(")", " ")},"papers_number":{self.number_of_article},"top_authors":{self.top_authors},"top_keywords":{self.top_keywords},"publication_year":{self.result_change_time_years},"publication_year_values":{self.result_change_time_numbers}'
return s1 + str + s3
def generate_dict_value(self, response):
dict = {}
json_articles = []
for article in self.articles:
json_articles.append(article.json_val)
dict["value"] = self.keyword.replace(")", ",")
dict["papers_number"] = self.number_of_article
dict["top_authors"] = self.top_authors
dict["top_keywords"] = self.top_keywords
dict["publication_year"] = self.result_change_time_years
dict["publication_year_values"] = self.result_change_time_numbers
dict["articles"] = json_articles
dict["empty_result"] = self.empty_result
del json_articles
response["keyword_pairs"].append(dict)
del dict
# collects the articles and prepares them for the search result operation
@staticmethod
def summary_articles(search_result, articles):
top_keywords = SearchResult.get_top_keywords_of_articles(articles)
# top_authors =[]
top_authors = SearchResult.get_top_authors_of_articles(articles)
time_change_dict = SearchResult.get_time_change_of_articles(articles)
time_change_list = list(time_change_dict.items())
years = [i[0] for i in time_change_list]
number_publication_per_year = [i[1] for i in time_change_list]
total_articles = len(articles)
search_result.change_article_number(total_articles)
search_result.add_years(years)
search_result.add_number_publication_per_years(number_publication_per_year)
search_result.add_top_authors(top_authors)
search_result.add_top_keywords(top_keywords)
# finds the 3 top keywords among the articles
@staticmethod
def get_top_keywords_of_articles(articles):
abstracts = ""
top_keywords = []
top_3_keywords = {}
for article in articles:
# abstracts += article.abstract
for keyword in article.top_three_keywords:
if keyword in top_3_keywords:
val = top_3_keywords[keyword] + len(article.abstract)
top_3_keywords.update({keyword: val})
else:
top_3_keywords[keyword] = len(article.abstract)
return sorted(top_3_keywords, key=top_3_keywords.get, reverse=True)[:3]
# finds the 3 top authors among the articles
@staticmethod
def get_top_authors_of_articles(articles):
all_authors = []
for article in articles:
for author in article.author_list:
all_authors.append(author)
most_common_authors = [word for word, word_count in Counter(all_authors).most_common(3)]
del all_authors
return most_common_authors
# calculates the publication dates among the articles and sorts them max to min
@staticmethod
def get_time_change_of_articles(articles):
dates = {}
for article in articles:
if len(article.article_date) > 0:
if article.article_date not in dates:
dates[article.article_date] = 1
else:
dates[article.article_date] += 1
return dict(sorted(dates.items(), key=lambda item: int(item[0]), reverse=False))
def page(request):
return render(request, 'html/index.html')
def summaryPage(request):
args = request.session.get('keyword_pairs')
return render(request, 'html/summary-page.html', args)
def findArticleNumber():
helper = SearchHelper("")
return helper.find_stored_article_number()
# how many article stored into mongodb
def findStoredArticleNumber(request):
dict= findArticleNumber()
# return render(request,json.dumps(dict))
return HttpResponse(json.dumps(dict), content_type="application/json")
def findAnnotationNumber():
helper = SearchHelper("")
return helper.find_annotation_size()
|
[] |
[] |
[
"ARTICLE_URL"
] |
[]
|
["ARTICLE_URL"]
|
python
| 1 | 0 | |
src/endpoints.go
|
package main
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var (
k8sInterface kubernetes.Interface
namespace string
service string
servicePort string
syncInterval time.Duration
)
func configureEndpoints() bool {
success := true
if k8sInterface == nil {
// creates the in-cluster config
config, err := rest.InClusterConfig()
if err != nil {
fmt.Printf("\x1b[31m[FATAL]\x1b[0m %s\n", err)
success = false
}
// creates the clientset
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
fmt.Printf("\x1b[31m[FATAL]\x1b[0m %s\n", err)
success = false
}
k8sInterface = kubernetes.Interface(clientSet)
}
if namespace == "" {
// Get the current namespace
namespaceFile, err := os.Open("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
fmt.Printf("\x1b[31m[FATAL]\x1b[0m %s\n", err)
success = false
}
namespaceData, err := ioutil.ReadAll(namespaceFile)
if err != nil {
fmt.Printf("\x1b[31m[FATAL]\x1b[0m %s\n", err)
success = false
}
namespace = string(namespaceData)
}
if service == "" {
// Get the service to multicast to
service = os.Getenv("SERVICE_NAME")
if service == "" {
fmt.Print("\x1b[31m[FATAL]\x1b[0m ${SERVICE_NAME} is empty -- a k8s service name must be provided\n")
success = false
}
}
if servicePort == "" {
// Get the service to multicast to
servicePort = os.Getenv("SERVICE_PORT")
if service == "" {
fmt.Print("\x1b[31m[FATAL]\x1b[0m ${SERVICE_PORT} is empty -- a k8s service port must be provided (name or number)\n")
success = false
}
}
if syncInterval == 0 {
// Get the k8s sync interval
interval := os.Getenv("SYNC_INTERVAL")
if interval == "" {
fmt.Print("\x1b[31m[FATAL]\x1b[0m ${SYNC_INTERVAL} is empty -- a sync timeout must be provided\n")
success = false
}
var err error
syncInterval, err = time.ParseDuration(interval)
if err != nil {
fmt.Printf("\x1b[31m[FATAL]\x1b[0m %s\n", err)
success = false
}
}
return success
}
func maintainEndpoints(
newAddresses chan string,
deadAddresses chan string,
) {
activeAddresses := make(map[string]bool)
endpointsAddresses := make(map[string]bool)
for {
// Endpoints map -- this is authoritative source from kubernetes
endpoints, err := k8sInterface.CoreV1().Endpoints(namespace).Get(service, metav1.GetOptions{})
if err != nil {
fmt.Printf("\x1b[33m[ERROR]\x1b[0m %s\n", err)
time.Sleep(syncInterval)
continue
}
// Clear the map
for key := range endpointsAddresses {
delete(endpointsAddresses, key)
}
// Populate the map
for index, subset := range endpoints.Subsets {
// Find matching port
servicePortNumber := ""
for _, ports := range subset.Ports {
portNumber := strconv.Itoa(int(ports.Port))
if ports.Name == servicePort ||
portNumber == servicePort {
servicePortNumber = portNumber
}
}
if servicePortNumber == "" {
fmt.Printf("\x1b[93m[WARNG]\x1b[0m No port found matching \"%s\" in endpoints %s subset %d\n",
servicePort,
endpoints.Name,
index,
)
continue
}
// Compile addresses from IP & Port
for _, addresses := range subset.Addresses {
address := addresses.IP + ":" + servicePortNumber
endpointsAddresses[address] = true
}
}
// For each address from k8s -- look for new addresses
for endpointAddress := range endpointsAddresses {
if activeAddresses[endpointAddress] {
// -- already tracking this address -- skip
continue
} else {
// -- haven't seen this address before
activeAddresses[endpointAddress] = true
newAddresses <- endpointAddress
}
}
// For each already tracked address -- look for missing addresses
for activeAddress := range activeAddresses {
if endpointsAddresses[activeAddress] {
// -- this address is still active -- skip
continue
} else {
// -- this address is dead
delete(activeAddresses, activeAddress)
deadAddresses <- activeAddress
}
}
// Wait for the sync interval in time
time.Sleep(syncInterval)
}
}
|
[
"\"SERVICE_NAME\"",
"\"SERVICE_PORT\"",
"\"SYNC_INTERVAL\""
] |
[] |
[
"SERVICE_NAME",
"SERVICE_PORT",
"SYNC_INTERVAL"
] |
[]
|
["SERVICE_NAME", "SERVICE_PORT", "SYNC_INTERVAL"]
|
go
| 3 | 0 | |
test-robust-acc-label.py
|
'''Train CIFAR10 with PyTorch
调整 factor,测试各 epoch=76 的 model
测试 benign acc 和 robust acc(在各个 label 下)
绘制成图
计算各 label 的特征中心,然后 label 特征中心的距离
'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import random
import os
import argparse
from models import *
# from utils import progress_bar
# from network import create_network
import cifar10my2
import cifar10my3
from sklearn import datasets
from sklearn.manifold import TSNE
from mpl_toolkits.mplot3d import Axes3D
from time import time
import numpy as np
import matplotlib.pyplot as plt
from models.wideresnet import WideResNet
from models.densenet import DenseNet121
from models.preactresnet import create_network
from torch.autograd import Variable
from time import time
from torch.utils.tensorboard import SummaryWriter
from torchsummaryX import summary
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--gpu', default='0', type=str, help='GPUs id')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
# Model facotrs
parser.add_argument('--depth', type=int, default=34, metavar='N',
help='model depth (default: 34)')
parser.add_argument('--widen_factor', type=int, default=10, metavar='N',
help='model widen_factor (default: 10)')
parser.add_argument('--droprate', type=float, default=0.0, metavar='N',
help='model droprate (default: 0.0)')
# draw imgs
parser.add_argument('--factors', default='model', type=str, metavar='N',
choices=['widen_factor', 'depth', 'droprate', 'epsilon', 'model'],
help='tensorboard draw img factors')
# PGD attack
parser.add_argument('--epsilon', default=0.031, type=float, help='perturbation')
parser.add_argument('--num-steps', default=20, help='perturb number of steps')
parser.add_argument('--step-size', default=0.003, help='perturb step size')
parser.add_argument('--random', default=True, help='random initialization for PGD')
# test on dataset
parser.add_argument('--dataset', default='CIFAR10', choices=['CIFAR10', 'CIFAR100', 'STL10', 'Imagnette', 'SVHN'],
help='train model on dataset')
args = parser.parse_args()
print(args)
# 设定 GPU
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_test = transforms.Compose([
transforms.ToTensor(),
# 对于 TRADES 提供的 model 注释掉
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# bs = 20
bs = 100
if args.dataset == 'CIFAR10':
testset = cifar10my3.CIFAR10MY(root='../data', train=False, download=True, transform=transform_test, args=args)
testloader = torch.utils.data.DataLoader(testset, batch_size=bs, shuffle=False, num_workers=2)
elif args.dataset == 'CIFAR100':
testset = cifar10my3.CIFAR100MY(root='../data', train=False, download=True, transform=transform_test, args=args)
testloader = torch.utils.data.DataLoader(testset, batch_size=bs, shuffle=False, num_workers=2)
cudnn.benchmark = True
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def loadmodel(i, factor):
# Model
# ckpt_list = ['model-wideres-epoch75.pt', 'model-wideres-epoch76.pt', 'model-wideres-epoch100.pt']
ckpt_list = ['model-wideres-epoch76.pt']
print('==> Building model..')
# path = '../Fair-AT/model-cifar-wideResNet/wideresnet/'
# ckpt = '/hot-data/niuzh/Mycode/pytorch-cifar-master/checkpoint/model_cifar_wrn.pt'
# ST
# ckpt = '/hot-data/niuzh/Mycode/Fair-AT/model-cifar-wideResNet/wideresnet/ST' \
# '/e0.031_depth34_widen10_drop0.0/'
# ckpt = '/hot-data/niuzh/Mycode/Fair-AT/model-cifar-wideResNet/wideresnet' \
# '/ST-ori/e0.031_depth34_widen10_drop0.0/'
# Fair ST
# ckpt = '/hot-data/niuzh/Mycode/Fair-AT/model-cifar-wideResNet/wideresnet/' \
# 'ST_fair_v1/e0.031_depth34_widen10_drop0.0/'
# TRADES AT
# ckpt = path + 'TRADES/e0.031_depth34_widen10_drop0.0/'
# ckpt = '../Fair-AT/model-cifar-wideResNet/wideresnet/TRADES/e0.031_depth34_widen10_drop0.0/'
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/e0.031_depth34_widen10_drop0.0'
# ckpt += 'model-wideres-epoch76.pt'
# ckpt = path + 'ST_fair_v1a_T0.1_L1/e0.031_depth34_widen10_drop0.0/'
# ckpt = path + 'TRADES_fair_v1a_T0.1_L1/e0.031_depth34_widen10_drop0.0/'
# ICML
# ckpt_list = ['trade_10_1.0.pt', 'trade_60_1.0.pt', 'trade_120_1.0.pt']
# ckpt = '../Robust-Fair/cifar10/models-wideresnet/fair1/'
# Fair AT
# ckpt = '../Fair-AT/model-cifar-wideResNet/wideresnet/TRADES/e0.031_depth34_widen10_drop0.0/'
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/e0.031_depth34_widen10_drop0.0/'
# ckpt += 'model-wideres-epoch76.pt'
# ckpt += ckpt_list[i]
ckpt = '/data/niuzh/model/cifar10_rst_adv.pt.ckpt'
checkpoint = torch.load(ckpt)
net = nn.DataParallel(WideResNet(depth=factor[1], widen_factor=factor[2], dropRate=factor[3])).cuda()
net.load_state_dict(checkpoint['state_dict'])
# net.load_state_dict(checkpoint)
net.eval()
print(ckpt)
return net
def loadmodel_preactresnte(i, factor):
# Model
# ckpt_list = ['model-wideres-epoch10.pt', 'model-wideres-epoch11.pt', 'model-wideres-epoch12.pt']
print('==> Building model..')
# AT preactresnet
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/e0.031_depth34_widen10_drop0.0/model-wideres-epoch76.pt'
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES_fair_v1a_T0.1_L1/e0.031_depth34_widen10_drop0.0/'
# ICML-21
# ckpt_list = ['trade_10_1.0.pt', 'trade_60_1.0.pt', 'trade_120_1.0.pt']
# ckpt_list = ['trade_120_1.0.pt']
# ckpt = '../Robust-Fair/cifar10/models-preactresnet/fair1/'
# net = create_network().cuda()
# Fair-AT
# ckpt_list = ['model-wideres-epoch75.pt', 'model-wideres-epoch76.pt', 'model-wideres-epoch100.pt']
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES_fair_v1a_T0.1_L1/e0.031_depth34_widen10_drop0.0/'
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES_fair_v1a_T0.1_L1-fl1/e0.031_depth34_widen10_drop0.0/'
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/e0.031_depth34_widen10_drop0.0/'
# ckpt_list = ['model-wideres-epoch75.pt', 'model-wideres-epoch76.pt', 'model-wideres-epoch100.pt']
# AT with OPT save
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/AT-opt/'
# ckpt_list = ['ckpt-epoch75.pt', 'ckpt-epoch76.pt', 'ckpt-epoch100.pt']
# rm label AT
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/rmlabel_seed2/rmlabel' + str(label) + '/'
# ckpt_list = ['model-wideres-epoch76.pt', 'model-wideres-epoch100.pt']
# Fine-Tune model
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/fine-tune/'
# ckpt_list = ['ckpt-ft-epoch76.pt', 'ckpt-ft-epoch100.pt', 'ckpt-ft-epoch120.pt']
# FC Fine-Tune model
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/fine-tune-FC/resum_100/'
# ckpt_list = ['ckpt-ft-epoch100.pt', 'ckpt-ft-epoch120.pt', 'ckpt-ft-epoch140.pt']
# 只在某 label 上,做 AT
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES/svlabel_seed1/svlabel_35/'
# ckpt_list = ['model-wideres-epoch76.pt', 'model-wideres-epoch100.pt']
# CIFAR 100, TRADES
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/TRADES_CIFAR100/'
# imagnette
# ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/ST_Imagnette/kplabel_seed1/percent_1.0/'
# SVHN
ckpt = '../Fair-AT/model-cifar-wideResNet/preactresnet/ST_SVHN/kplabel_seed1/percent_0.01/'
ckpt_list = ['model-wideres-epoch100.pt', 'model-wideres-epoch100.pt']
# ckpt_list = ['model-wideres-epoch76.pt', 'model-wideres-epoch100.pt']
if args.dataset == 'CIFAR10' or 'STL10' or 'Imagnette' or 'SVHN':
num_classes = 10
elif args.dataset == 'CIFAR100':
num_classes = 100
net = nn.DataParallel(create_network(num_classes)).cuda()
ckpt += ckpt_list[i]
# print(net)
net.load_state_dict(torch.load(ckpt))
# for AT-opt & Fine-tune model
# checkpoint = torch.load(ckpt)
# net.load_state_dict(checkpoint['net'])
net.eval()
print(ckpt)
return net
# Fair model from ICML 21
# def loadmodel_robustfair(i, factor):
# # Model
# ckpt_list = ['trade_120_1.0.pt']
# print('==> Building model..')
# ckpt = '../Robust-Fair/cifar10/models/'
# ckpt += ckpt_list[i]
# net = create_network().cuda()
# # net = nn.DataParallel(WideResNet(depth=factor[1], widen_factor=factor[2], dropRate=factor[3])).cuda()
# # net.load_state_dict(torch.load(path + ckpt))
# net.load_state_dict(torch.load(ckpt))
# net.eval()
# print(ckpt)
# return net
# PGD Attack
def _pgd_whitebox(model, X, y, epsilon, num_steps=args.num_steps, step_size=args.step_size):
rep, out = model(X)
N, C, H, W = rep.size()
rep = rep.reshape([N, -1])
out = out.data.max(1)[1]
X_pgd = Variable(X.data, requires_grad=True)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).cuda()
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model(X_pgd)[1], y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
rep_pgd, out_pgd = model(X_pgd)
out_pgd = out_pgd.data.max(1)[1]
rep_pgd = rep_pgd.reshape([N, -1])
return out, rep, out_pgd, rep_pgd
# input: tensorboard, model, model_name
def test(writer, net, model_name, epsilon):
global best_acc
global best_epoch
accs_batch = []
acc_robust_label = []
acc_natural_label = []
count = 0
robust_err_total_label = 0
natural_err_total_label = 0
tmprep, _ = net(torch.zeros([20, 3, 32, 32]).cuda())
_, C, H, W = tmprep.size()
# center of the rep
if args.dataset == 'CIFAR10':
label_test = 1000
rep_label = torch.zeros([10, C * H * W]).cuda()
rep_robust_label = torch.zeros([10, C * H * W]).cuda()
elif args.dataset == 'CIFAR100':
label_test = 100
rep_label = torch.zeros([100, C * H * W]).cuda()
rep_robust_label = torch.zeros([100, C * H * W]).cuda()
rep_all = torch.zeros([C * H * W]).cuda()
rep_pgd_all = torch.zeros([C * H * W]).cuda()
i = 0
with torch.no_grad():
# for inputs, targets in testloader:
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
X, y = Variable(inputs, requires_grad=True), Variable(targets)
err_natural, err_robust, rep, rep_pgd = _pgd_whitebox(net, X, y, epsilon=epsilon)
robust_err_total_label += err_robust
natural_err_total_label += err_natural
# 累加 rep
rep_all = rep_all + rep.sum(dim=0)
rep_pgd_all = rep_pgd_all + rep_pgd.sum(dim=0)
count = bs + count
# 计算每个类别下的 err
if count % label_test == 0:
rep_label[i] = rep_all/label_test # 计算 rep 中心
rep_robust_label[i] = rep_pgd_all/label_test
# 清空
rep_all = torch.zeros([C * H * W]).cuda()
rep_pgd_all = torch.zeros([C * H * W]).cuda()
i += 1
robust_acc = (1 - robust_err_total_label / label_test).cpu().numpy()
natural_acc = (1 - natural_err_total_label / label_test).cpu().numpy()
acc_robust_label.append(robust_acc)
acc_natural_label.append(natural_acc)
robust_err_total_label = 0
natural_err_total_label = 0
# 输出各 label 下的 acc
print('acc_natural_label:')
for i in acc_natural_label:
print('{:3f}'.format(i))
print('acc_robust_label:')
for i in acc_robust_label:
print('{:3f}'.format(i))
# 各 label 的 Rep 中心归一化,计算余弦相似度
rep_norm = nn.functional.normalize(rep_label, dim=1)
logits = torch.mm(rep_norm, torch.transpose(rep_norm, 0, 1)) # [10,HW]*[HW,10]=[10,10]
logits = logits - torch.diag_embed(torch.diag(logits)) # 去掉对角线的 1
# logits = logits.abs().sum().cpu().numpy()
# 只统计 cos sim 大于 0 的
zero = torch.zeros_like(logits)
logits1 = torch.where(logits < 0, zero, logits)
logits1 = logits1.sum().cpu().numpy()
print('Sum distance of each label rep: {:.2f}'.format(logits1))
rep_robust = nn.functional.normalize(rep_robust_label, dim=1)
logits_robust = torch.mm(rep_robust, torch.transpose(rep_robust, 0, 1)) # [10,HW]*[HW,10]=[10,10]
logits_robust = logits_robust - torch.diag_embed(torch.diag(logits_robust)) # 去掉对角线的 1
# logits_robust = logits_robust.abs().sum().cpu().numpy()
# 值统计大于 0 的
zero = torch.zeros_like(logits_robust)
logits2 = torch.where(logits_robust < 0, zero, logits_robust)
logits2 = logits2.sum().cpu().numpy()
print('Sum distance of robust label rep: {:.2f}'.format(logits2))
return logits1, logits2
def main():
start = time()
seed_everything(1)
writer = SummaryWriter(comment='test_comment', filename_suffix="test_suffix")
# load model
# 根据测试的 factor 选择对应的 model
print('factors:', args.factors)
logits = [0, 0, 0]
logits_robust = [0, 0, 0]
model_num = 2
if args.factors == 'model':
for i in range(model_num):
print("Test: " + str(i))
factor = [args.epsilon, args.depth, args.widen_factor, args.droprate]
# net = loadmodel(i, factor)
net = loadmodel_preactresnte(i, factor)
# test robust fair model
# net = loadmodel_robustfair(i, factor)
logits[i], logits_robust[i] = test(writer, net, 'model_name', factor[0])
else:
raise Exception('this should never happen')
# sum of the dis of the center rep
for m in range(model_num):
print('%.2f' % logits[m])
for m in range(model_num):
print('%.2f' % logits_robust[m])
writer.close()
end = time()
print('时间:{:3f}'.format((end - start) / 60))
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"PYTHONHASHSEED"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "PYTHONHASHSEED"]
|
python
| 2 | 0 | |
app/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lenquete.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
chain/sync.go
|
package chain
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/Gurpartap/async"
"github.com/hashicorp/go-multierror"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p-core/connmgr"
"github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/whyrusleeping/pubsub"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
blst "github.com/supranational/blst/bindings/go"
// named msgarray here to make it clear that these are the types used by
// messages, regardless of specs-actors version.
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/lib/sigs/bls"
"github.com/filecoin-project/lotus/metrics"
)
// Blocks that are more than MaxHeightDrift epochs above
// the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
var (
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
// where the Syncer publishes candidate chain heads to be synced.
LocalIncoming = "incoming"
log = logging.Logger("chain")
concurrentSyncRequests = exchange.ShufflePeersPrefix
syncRequestBatchSize = 8
syncRequestRetries = 5
)
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
//
// * Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// * Requests block headers and messages from other peers when not available
// in our BlockStore.
// * Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
//
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
// interfacing processes are part of other components, such as the SyncManager
// (which owns the sync scheduler and sync workers), ChainExchange, the HELLO
// protocol, and the gossipsub block propagation layer.
//
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
// chain with the heaviest weight, so long as it hasn’t deviated one finality
// threshold from our head (900 epochs, parameter determined by spec-actors)".
type Syncer struct {
// The interface for accessing and putting tipsets into local storage
store *store.ChainStore
// handle to the random beacon for verification
beacon beacon.Schedule
// the state manager handles making state queries
sm *stmgr.StateManager
// The known Genesis tipset
Genesis *types.TipSet
// TipSets known to be invalid
bad *BadBlockCache
// handle to the block sync service
Exchange exchange.Client
self peer.ID
syncmgr SyncManager
connmgr connmgr.ConnManager
incoming *pubsub.PubSub
receiptTracker *blockReceiptTracker
verifier ffiwrapper.Verifier
tickerCtxCancel context.CancelFunc
checkptLk sync.Mutex
checkpt types.TipSetKey
ds dtypes.MetadataDS
}
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
// NewSyncer creates a new Syncer object.
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
gen, err := sm.ChainStore().GetGenesis()
if err != nil {
return nil, xerrors.Errorf("getting genesis block: %w", err)
}
gent, err := types.NewTipSet([]*types.BlockHeader{gen})
if err != nil {
return nil, err
}
cp, err := loadCheckpoint(ds)
if err != nil {
return nil, xerrors.Errorf("error loading mpool config: %w", err)
}
s := &Syncer{
ds: ds,
checkpt: cp,
beacon: beacon,
bad: NewBadBlockCache(),
Genesis: gent,
Exchange: exchange,
store: sm.ChainStore(),
sm: sm,
self: self,
receiptTracker: newBlockReceiptTracker(),
connmgr: connmgr,
verifier: verifier,
incoming: pubsub.New(50),
}
if build.InsecurePoStValidation {
log.Warn("*********************************************************************************************")
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
log.Warn("*********************************************************************************************")
}
s.syncmgr = syncMgrCtor(s.Sync)
return s, nil
}
func (syncer *Syncer) Start() {
tickerCtx, tickerCtxCancel := context.WithCancel(context.Background())
syncer.syncmgr.Start()
syncer.tickerCtxCancel = tickerCtxCancel
go syncer.runMetricsTricker(tickerCtx)
}
func (syncer *Syncer) runMetricsTricker(tickerCtx context.Context) {
genesisTime := time.Unix(int64(syncer.Genesis.MinTimestamp()), 0)
ticker := build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
sinceGenesis := build.Clock.Now().Sub(genesisTime)
expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs)
stats.Record(tickerCtx, metrics.ChainNodeHeightExpected.M(expectedHeight))
case <-tickerCtx.Done():
return
}
}
}
func (syncer *Syncer) Stop() {
syncer.syncmgr.Stop()
syncer.tickerCtxCancel()
}
// InformNewHead informs the syncer about a new potential tipset
// This should be called when connecting to new peers, and additionally
// when receiving new blocks from the network
func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
defer func() {
if err := recover(); err != nil {
log.Errorf("panic in InformNewHead: ", err)
}
}()
ctx := context.Background()
if fts == nil {
log.Errorf("got nil tipset in InformNewHead")
return false
}
if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
return false
}
for _, b := range fts.Blocks {
if reason, ok := syncer.bad.Has(b.Cid()); ok {
log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason)
return false
}
if err := syncer.ValidateMsgMeta(b); err != nil {
log.Warnf("invalid block received: %s", err)
return false
}
}
syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming)
if from == syncer.self {
// TODO: this is kindof a hack...
log.Debug("got block from ourselves")
if err := syncer.Sync(ctx, fts.TipSet()); err != nil {
log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err)
return false
}
return true
}
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
// the blockstore
if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil {
log.Warn("failed to persist incoming block header: ", err)
return false
}
syncer.Exchange.AddPeer(from)
hts := syncer.store.GetHeaviestTipSet()
bestPweight := hts.ParentWeight()
targetWeight := fts.TipSet().ParentWeight()
if targetWeight.LessThan(bestPweight) {
var miners []string
for _, blk := range fts.TipSet().Blocks() {
miners = append(miners, blk.Miner.String())
}
log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
return false
}
syncer.syncmgr.SetPeerHead(ctx, from, fts.TipSet())
return true
}
// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to
// receive new block headers as they arrive from the network, and sends them to
// the returned channel.
//
// These blocks have not necessarily been incorporated to our view of the chain.
func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
sub := syncer.incoming.Sub(LocalIncoming)
out := make(chan *types.BlockHeader, 10)
go func() {
defer syncer.incoming.Unsub(sub, LocalIncoming)
for {
select {
case r := <-sub:
hs := r.([]*types.BlockHeader)
for _, h := range hs {
select {
case out <- h:
case <-ctx.Done():
return
}
}
case <-ctx.Done():
return
}
}
}()
return out, nil
}
// ValidateMsgMeta performs structural and content hash validation of the
// messages within this block. If validation passes, it stores the messages in
// the underlying IPLD block store.
func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit {
return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc)
}
// TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta
// computation need to go into the 'temporary' side of the blockstore when
// we implement that
// We use a temporary bstore here to avoid writing intermediate pieces
// into the blockstore.
blockstore := bstore.NewTemporary()
cst := cbor.NewCborStore(blockstore)
var bcids, scids []cid.Cid
for _, m := range fblk.BlsMessages {
c, err := store.PutMessage(blockstore, m)
if err != nil {
return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err)
}
bcids = append(bcids, c)
}
for _, m := range fblk.SecpkMessages {
c, err := store.PutMessage(blockstore, m)
if err != nil {
return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err)
}
scids = append(scids, c)
}
// Compute the root CID of the combined message trie.
smroot, err := computeMsgMeta(cst, bcids, scids)
if err != nil {
return xerrors.Errorf("validating msgmeta, compute failed: %w", err)
}
// Check that the message trie root matches with what's in the block.
if fblk.Header.Messages != smroot {
return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot)
}
// Finally, flush.
return vm.Copy(context.TODO(), blockstore, syncer.store.Blockstore(), smroot)
}
func (syncer *Syncer) LocalPeer() peer.ID {
return syncer.self
}
func (syncer *Syncer) ChainStore() *store.ChainStore {
return syncer.store
}
func (syncer *Syncer) InformNewBlock(from peer.ID, blk *types.FullBlock) bool {
// TODO: search for other blocks that could form a tipset with this block
// and then send that tipset to InformNewHead
fts := &store.FullTipSet{Blocks: []*types.FullBlock{blk}}
return syncer.InformNewHead(from, fts)
}
func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error {
ctx, span := trace.StartSpan(ctx, "copyBlockstore")
defer span.End()
cids, err := from.AllKeysChan(ctx)
if err != nil {
return err
}
// TODO: should probably expose better methods on the blockstore for this operation
var blks []blocks.Block
for c := range cids {
b, err := from.Get(c)
if err != nil {
return err
}
blks = append(blks, b)
}
if err := to.PutMany(blks); err != nil {
return err
}
return nil
}
// TODO: this function effectively accepts unchecked input from the network,
// either validate it here, or ensure that its validated elsewhere (maybe make
// sure the blocksync code checks it?)
// maybe this code should actually live in blocksync??
func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) {
if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) {
return nil, fmt.Errorf("msgincl length didnt match tipset size")
}
fts := &store.FullTipSet{}
for bi, b := range ts.Blocks() {
if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit {
return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc)
}
var smsgs []*types.SignedMessage
var smsgCids []cid.Cid
for _, m := range smi[bi] {
smsgs = append(smsgs, allsmsgs[m])
smsgCids = append(smsgCids, allsmsgs[m].Cid())
}
var bmsgs []*types.Message
var bmsgCids []cid.Cid
for _, m := range bmi[bi] {
bmsgs = append(bmsgs, allbmsgs[m])
bmsgCids = append(bmsgCids, allbmsgs[m].Cid())
}
mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids)
if err != nil {
return nil, err
}
if b.Messages != mrcid {
return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key())
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
fts.Blocks = append(fts.Blocks, fb)
}
return fts, nil
}
// computeMsgMeta computes the root CID of the combined arrays of message CIDs
// of both types (BLS and Secpk).
func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) {
// block headers use adt0
store := blockadt.WrapStore(context.TODO(), bs)
bmArr := blockadt.MakeEmptyArray(store)
smArr := blockadt.MakeEmptyArray(store)
for i, m := range bmsgCids {
c := cbg.CborCid(m)
if err := bmArr.Set(uint64(i), &c); err != nil {
return cid.Undef, err
}
}
for i, m := range smsgCids {
c := cbg.CborCid(m)
if err := smArr.Set(uint64(i), &c); err != nil {
return cid.Undef, err
}
}
bmroot, err := bmArr.Root()
if err != nil {
return cid.Undef, err
}
smroot, err := smArr.Root()
if err != nil {
return cid.Undef, err
}
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to put msgmeta: %w", err)
}
return mrcid, nil
}
// FetchTipSet tries to load the provided tipset from the store, and falls back
// to the network (client) by querying the supplied peer if not found
// locally.
//
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
// peer's heaviest tipset if we don't have it.
func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil {
return fts, nil
}
// fall back to the network.
return syncer.Exchange.GetFullTipSet(ctx, p, tsk)
}
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
// representation of it containing FullBlocks. If ALL blocks are not found
// locally, it errors entirely with blockstore.ErrNotFound.
func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) {
ts, err := syncer.store.LoadTipSet(tsk)
if err != nil {
return nil, err
}
fts := &store.FullTipSet{}
for _, b := range ts.Blocks() {
bmsgs, smsgs, err := syncer.store.MessagesForBlock(b)
if err != nil {
return nil, err
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
fts.Blocks = append(fts.Blocks, fb)
}
return fts, nil
}
// Sync tries to advance our view of the chain to `maybeHead`. It does nothing
// if our current head is heavier than the requested tipset, or if we're already
// at the requested head, or if the head is the genesis.
//
// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the
// godocs on that method for a more detailed view.
func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "chain.Sync")
defer span.End()
if span.IsRecordingEvents() {
span.AddAttributes(
trace.StringAttribute("tipset", fmt.Sprint(maybeHead.Cids())),
trace.Int64Attribute("height", int64(maybeHead.Height())),
)
}
hts := syncer.store.GetHeaviestTipSet()
if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
return nil
}
if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) {
return nil
}
if err := syncer.collectChain(ctx, maybeHead, hts); err != nil {
span.AddAttributes(trace.StringAttribute("col_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
Message: err.Error(),
})
return xerrors.Errorf("collectChain failed: %w", err)
}
// At this point we have accepted and synced to the new `maybeHead`
// (`StageSyncComplete`).
if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
Message: err.Error(),
})
return xerrors.Errorf("failed to put synced tipset to chainstore: %w", err)
}
peers := syncer.receiptTracker.GetPeers(maybeHead)
if len(peers) > 0 {
syncer.connmgr.TagPeer(peers[0], "new-block", 40)
for _, p := range peers[1:] {
syncer.connmgr.TagPeer(p, "new-block", 25)
}
}
return nil
}
func isPermanent(err error) bool {
return !errors.Is(err, ErrTemporal)
}
func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error {
ctx, span := trace.StartSpan(ctx, "validateTipSet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("height", int64(fts.TipSet().Height())))
ts := fts.TipSet()
if ts.Equals(syncer.Genesis) {
return nil
}
var futures []async.ErrorFuture
for _, b := range fts.Blocks {
b := b // rebind to a scoped variable
futures = append(futures, async.Err(func() error {
if err := syncer.ValidateBlock(ctx, b, useCache); err != nil {
if isPermanent(err) {
syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error()))
}
return xerrors.Errorf("validating block %s: %w", b.Cid(), err)
}
if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil {
return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err)
}
return nil
}))
}
for _, f := range futures {
if err := f.AwaitContext(ctx); err != nil {
return err
}
}
return nil
}
func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
act, err := syncer.sm.LoadActor(ctx, power.Address, baseTs)
if err != nil {
return xerrors.Errorf("failed to load power actor: %w", err)
}
powState, err := power.Load(syncer.store.Store(ctx), act)
if err != nil {
return xerrors.Errorf("failed to load power actor state: %w", err)
}
_, exist, err := powState.MinerPower(maddr)
if err != nil {
return xerrors.Errorf("failed to look up miner's claim: %w", err)
}
if !exist {
return xerrors.New("miner isn't valid")
}
return nil
}
var ErrTemporal = errors.New("temporal error")
func blockSanityChecks(h *types.BlockHeader) error {
if h.ElectionProof == nil {
return xerrors.Errorf("block cannot have nil election proof")
}
if h.Ticket == nil {
return xerrors.Errorf("block cannot have nil ticket")
}
if h.BlockSig == nil {
return xerrors.Errorf("block had nil signature")
}
if h.BLSAggregate == nil {
return xerrors.Errorf("block had nil bls aggregate signature")
}
return nil
}
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) {
defer func() {
// b.Cid() could panic for empty blocks that are used in tests.
if rerr := recover(); rerr != nil {
err = xerrors.Errorf("validate block panic: %w", rerr)
return
}
}()
if useCache {
isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
if err != nil {
return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
}
if isValidated {
return nil
}
}
validationStart := build.Clock.Now()
defer func() {
stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart)))
log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height, "age", time.Since(time.Unix(int64(b.Header.Timestamp), 0)))
}()
ctx, span := trace.StartSpan(ctx, "validateBlock")
defer span.End()
if err := blockSanityChecks(b.Header); err != nil {
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
}
h := b.Header
baseTs, err := syncer.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
if err != nil {
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
}
winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height())
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
if err != nil {
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
}
prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs)
if err != nil {
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
}
// fast checks first
nulls := h.Height - (baseTs.Height() + 1)
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
}
now := uint64(build.Clock.Now().Unix())
if h.Timestamp > now+build.AllowableClockDriftSecs {
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
}
if h.Timestamp > now {
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
}
msgsCheck := async.Err(func() error {
if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
return nil
})
minerCheck := async.Err(func() error {
if err := syncer.minerIsValid(ctx, h.Miner, baseTs); err != nil {
return xerrors.Errorf("minerIsValid failed: %w", err)
}
return nil
})
baseFeeCheck := async.Err(func() error {
baseFee, err := syncer.store.ComputeBaseFee(ctx, baseTs)
if err != nil {
return xerrors.Errorf("computing base fee: %w", err)
}
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
b.Header.ParentBaseFee, baseFee)
}
return nil
})
pweight, err := syncer.store.Weight(ctx, baseTs)
if err != nil {
return xerrors.Errorf("getting parent weight: %w", err)
}
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
b.Header.ParentWeight, pweight)
}
stateRootCheck := async.Err(func() error {
stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
if stateroot != h.ParentStateRoot {
msgs, err := syncer.store.MessagesForTipset(baseTs)
if err != nil {
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
} else {
log.Warn("Messages for tipset with mismatching state:")
for i, m := range msgs {
mm := m.VMMessage()
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
}
}
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
}
if precp != h.ParentMessageReceipts {
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
}
return nil
})
// Stuff that needs worker address
waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
}
winnerCheck := async.Err(func() error {
if h.ElectionProof.WinCount < 1 {
return xerrors.Errorf("block is not claiming to be a winner")
}
eligible, err := stmgr.MinerEligibleToMine(ctx, syncer.sm, h.Miner, baseTs, lbts)
if err != nil {
return xerrors.Errorf("determining if miner has min power failed: %w", err)
}
if !eligible {
return xerrors.New("block's miner is ineligible to mine")
}
rBeacon := *prevBeacon
if len(h.BeaconEntries) != 0 {
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("could not draw randomness: %w", err)
}
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
return xerrors.Errorf("validating block election proof failed: %w", err)
}
slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner)
if err != nil {
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
}
if slashed {
return xerrors.Errorf("received block was from slashed or invalid miner")
}
mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("failed getting power: %w", err)
}
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
if h.ElectionProof.WinCount != j {
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
}
return nil
})
blockSigCheck := async.Err(func() error {
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
return xerrors.Errorf("check block signature failed: %w", err)
}
return nil
})
beaconValuesCheck := async.Err(func() error {
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return nil
}
if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
}
return nil
})
tktsCheck := async.Err(func() error {
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
if h.Height > build.UpgradeSmokeHeight {
buf.Write(baseTs.MinTicket().VRFProof)
}
beaconBase := *prevBeacon
if len(h.BeaconEntries) != 0 {
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
}
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
if err != nil {
return xerrors.Errorf("validating block tickets failed: %w", err)
}
return nil
})
wproofCheck := async.Err(func() error {
if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
return xerrors.Errorf("invalid election post: %w", err)
}
return nil
})
await := []async.ErrorFuture{
minerCheck,
tktsCheck,
blockSigCheck,
beaconValuesCheck,
wproofCheck,
winnerCheck,
msgsCheck,
baseFeeCheck,
stateRootCheck,
}
var merr error
for _, fut := range await {
if err := fut.AwaitContext(ctx); err != nil {
merr = multierror.Append(merr, err)
}
}
if merr != nil {
mulErr := merr.(*multierror.Error)
mulErr.ErrorFormat = func(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %+v", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
return mulErr
}
if useCache {
if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
}
}
return nil
}
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
if build.InsecurePoStValidation {
if len(h.WinPoStProof) == 0 {
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
}
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
return nil
}
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address: %w", err)
}
rbase := prevBeacon
if len(h.BeaconEntries) > 0 {
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
}
mid, err := address.IDFromAddress(h.Miner)
if err != nil {
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,
Prover: abi.ActorID(mid),
})
if err != nil {
return xerrors.Errorf("failed to verify election post: %w", err)
}
if !ok {
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
return xerrors.Errorf("winning post was invalid")
}
return nil
}
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
{
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
var pubks [][]byte
for _, m := range b.BlsMessages {
sigCids = append(sigCids, m.Cid())
pubk, err := syncer.sm.GetBlsPublicKey(ctx, m.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
}
pubks = append(pubks, pubk)
}
if err := syncer.verifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
}
}
nonces := make(map[address.Address]uint64)
stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return err
}
st, err := state.LoadStateTree(syncer.store.Store(ctx), stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
pl := vm.PricelistByEpoch(baseTs.Height())
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil {
return err
}
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
// So below is overflow safe
sumGasLimit += m.GasLimit
if sumGasLimit > build.BlockGasLimit {
return xerrors.Errorf("block gas limit exceeded")
}
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
if _, ok := nonces[m.From]; !ok {
// `GetActor` does not validate that this is an account actor.
act, err := st.GetActor(m.From)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
if !builtin.IsAccountActor(act.Code) {
return xerrors.New("Sender must be an account actor")
}
nonces[m.From] = act.Nonce
}
if nonces[m.From] != m.Nonce {
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[m.From], m.Nonce)
}
nonces[m.From]++
return nil
}
// Validate message arrays in a temporary blockstore.
tmpbs := bstore.NewTemporary()
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
bmArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := bmArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
}
}
smArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.SecpkMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
}
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to resolve key addr: %w", err)
}
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := smArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if b.Header.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header")
}
// Finally, flush.
return vm.Copy(ctx, tmpbs, syncer.store.Blockstore(), mrcid)
}
func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
_, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate")
defer span.End()
span.AddAttributes(
trace.Int64Attribute("msgCount", int64(len(msgs))),
)
msgsS := make([]blst.Message, len(msgs))
for i := 0; i < len(msgs); i++ {
msgsS[i] = msgs[i].Bytes()
}
if len(msgs) == 0 {
return nil
}
valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks,
msgsS, []byte(bls.DST))
if !valid {
return xerrors.New("bls aggregate signature failed to verify")
}
return nil
}
type syncStateKey struct{}
func extractSyncState(ctx context.Context) *SyncerState {
v := ctx.Value(syncStateKey{})
if v != nil {
return v.(*SyncerState)
}
return nil
}
// collectHeaders collects the headers from the blocks between any two tipsets.
//
// `incoming` is the heaviest/projected/target tipset we have learned about, and
// `known` is usually an anchor tipset we already have in our view of the chain
// (which could be the genesis).
//
// collectHeaders checks if portions of the chain are in our ChainStore; falling
// down to the network to retrieve the missing parts. If during the process, any
// portion we receive is in our denylist (bad list), we short-circuit.
//
// {hint/usage}: This is used by collectChain, which is in turn called from the
// main Sync method (Syncer#Sync), so it's a pretty central method.
//
// {hint/logic}: The logic of this method is as follows:
//
// 1. Check that the from tipset is not linked to a parent block known to be
// bad.
// 2. Check the consistency of beacon entries in the from tipset. We check
// total equality of the BeaconEntries in each block.
// 3. Traverse the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via client in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork
// to resolve it. Refer to the godocs there.
//
// All throughout the process, we keep checking if the received blocks are in
// the deny list, and short-circuit the process if so.
func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "collectHeaders")
defer span.End()
ss := extractSyncState(ctx)
span.AddAttributes(
trace.Int64Attribute("incomingHeight", int64(incoming.Height())),
trace.Int64Attribute("knownHeight", int64(known.Height())),
)
// Check if the parents of the from block are in the denylist.
// i.e. if a fork of the chain has been requested that we know to be bad.
for _, pcid := range incoming.Parents().Cids() {
if reason, ok := syncer.bad.Has(pcid); ok {
newReason := reason.Linked("linked to %s", pcid)
for _, b := range incoming.Cids() {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain linked to block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), pcid, reason)
}
}
{
// ensure consistency of beacon entires
targetBE := incoming.Blocks()[0].BeaconEntries
sorted := sort.SliceIsSorted(targetBE, func(i, j int) bool {
return targetBE[i].Round < targetBE[j].Round
})
if !sorted {
syncer.bad.Add(incoming.Cids()[0], NewBadBlockReason(incoming.Cids(), "wrong order of beacon entires"))
return nil, xerrors.Errorf("wrong order of beacon entires")
}
for _, bh := range incoming.Blocks()[1:] {
if len(targetBE) != len(bh.BeaconEntries) {
// cannot mark bad, I think @Kubuxu
return nil, xerrors.Errorf("tipset contained different number for beacon entires")
}
for i, be := range bh.BeaconEntries {
if targetBE[i].Round != be.Round || !bytes.Equal(targetBE[i].Data, be.Data) {
// cannot mark bad, I think @Kubuxu
return nil, xerrors.Errorf("tipset contained different beacon entires")
}
}
}
}
blockSet := []*types.TipSet{incoming}
// Parent of the new (possibly better) tipset that we need to fetch next.
at := incoming.Parents()
// we want to sync all the blocks until the height above our
// best tipset so far
untilHeight := known.Height() + 1
ss.SetHeight(blockSet[len(blockSet)-1].Height())
var acceptedBlocks []cid.Cid
loop:
for blockSet[len(blockSet)-1].Height() > untilHeight {
for _, bc := range at.Cids() {
if reason, ok := syncer.bad.Has(bc); ok {
newReason := reason.Linked("change contained %s", bc)
for _, b := range acceptedBlocks {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason)
}
}
// If, for some reason, we have a suffix of the chain locally, handle that here
ts, err := syncer.store.LoadTipSet(at)
if err == nil {
acceptedBlocks = append(acceptedBlocks, at.Cids()...)
blockSet = append(blockSet, ts)
at = ts.Parents()
continue
}
if !xerrors.Is(err, bstore.ErrNotFound) {
log.Warn("loading local tipset: %s", err)
}
// NB: GetBlocks validates that the blocks are in-fact the ones we
// requested, and that they are correctly linked to one another. It does
// not validate any state transitions.
window := 500
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
window = gap
}
blks, err := syncer.Exchange.GetBlocks(ctx, at, window)
if err != nil {
// Most likely our peers aren't fully synced yet, but forwarded
// new block message (ideally we'd find better peers)
log.Errorf("failed to get blocks: %+v", err)
span.AddAttributes(trace.StringAttribute("error", err.Error()))
// This error will only be logged above,
return nil, xerrors.Errorf("failed to get blocks: %w", err)
}
log.Info("Got blocks: ", blks[0].Height(), len(blks))
// Check that the fetched segment of the chain matches what we already
// have. Since we fetch from the head backwards our reassembled chain
// is sorted in reverse here: we have a child -> parent order, our last
// tipset then should be child of the first tipset retrieved.
// FIXME: The reassembly logic should be part of the `client`
// service, the consumer should not be concerned with the
// `MaxRequestLength` limitation, it should just be able to request
// an segment of arbitrary length. The same burden is put on
// `syncFork()` which needs to be aware this as well.
if blockSet[len(blockSet)-1].IsChildOf(blks[0]) == false {
return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d",
blockSet[len(blockSet)-1].Height(), blks[0].Height())
// A successful `GetBlocks()` call is guaranteed to fetch at least
// one tipset so the acess `blks[0]` is safe.
}
for _, b := range blks {
if b.Height() < untilHeight {
break loop
}
for _, bc := range b.Cids() {
if reason, ok := syncer.bad.Has(bc); ok {
newReason := reason.Linked("change contained %s", bc)
for _, b := range acceptedBlocks {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason)
}
}
blockSet = append(blockSet, b)
}
acceptedBlocks = append(acceptedBlocks, at.Cids()...)
ss.SetHeight(blks[len(blks)-1].Height())
at = blks[len(blks)-1].Parents()
}
base := blockSet[len(blockSet)-1]
if base.Equals(known) {
blockSet = blockSet[:len(blockSet)-1]
base = blockSet[len(blockSet)-1]
}
if base.IsChildOf(known) {
// common case: receiving blocks that are building on top of our best tipset
return blockSet, nil
}
knownParent, err := syncer.store.LoadTipSet(known.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
}
if base.IsChildOf(knownParent) {
// common case: receiving a block thats potentially part of the same tipset as our best block
return blockSet, nil
}
// We have now ascertained that this is *not* a 'fast forward'
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height())
fork, err := syncer.syncFork(ctx, base, known)
if err != nil {
if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) {
// TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish?
log.Warn("adding forked chain to our bad tipset cache")
for _, b := range incoming.Blocks() {
syncer.bad.Add(b.Cid(), NewBadBlockReason(incoming.Cids(), "fork past finality"))
}
}
return nil, xerrors.Errorf("failed to sync fork: %w", err)
}
blockSet = append(blockSet, fork...)
return blockSet, nil
}
var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkpointed block")
// syncFork tries to obtain the chain fragment that links a fork into a common
// ancestor in our view of the chain.
//
// If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint),
// we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
chkpt := syncer.GetCheckpoint()
if known.Key() == chkpt {
return nil, ErrForkCheckpoint
}
// TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2?
// Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
if err != nil {
return nil, err
}
nts, err := syncer.store.LoadTipSet(known.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
}
for cur := 0; cur < len(tips); {
if nts.Height() == 0 {
if !syncer.Genesis.Equals(nts) {
return nil, xerrors.Errorf("somehow synced chain that linked back to a different genesis (bad genesis: %s)", nts.Key())
}
return nil, xerrors.Errorf("synced chain forked at genesis, refusing to sync; incoming: %s", incoming.Cids())
}
if nts.Equals(tips[cur]) {
return tips[:cur+1], nil
}
if nts.Height() < tips[cur].Height() {
cur++
} else {
// We will be forking away from nts, check that it isn't checkpointed
if nts.Key() == chkpt {
return nil, ErrForkCheckpoint
}
nts, err = syncer.store.LoadTipSet(nts.Parents())
if err != nil {
return nil, xerrors.Errorf("loading next local tipset: %w", err)
}
}
}
return nil, ErrForkTooLong
}
func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*types.TipSet) error {
ss := extractSyncState(ctx)
ss.SetHeight(headers[len(headers)-1].Height())
return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error {
log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids()))
if err := syncer.ValidateTipSet(ctx, fts, true); err != nil {
log.Errorf("failed to validate tipset: %+v", err)
return xerrors.Errorf("message processing failed: %w", err)
}
stats.Record(ctx, metrics.ChainNodeWorkerHeight.M(int64(fts.TipSet().Height())))
ss.SetHeight(fts.TipSet().Height())
return nil
})
}
// fills out each of the given tipsets with messages and calls the callback with it
func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error {
ss := extractSyncState(ctx)
ctx, span := trace.StartSpan(ctx, "iterFullTipsets")
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
for i := len(headers) - 1; i >= 0; {
fts, err := syncer.store.TryFillTipSet(headers[i])
if err != nil {
return err
}
if fts != nil {
if err := cb(ctx, fts); err != nil {
return err
}
i--
continue
}
batchSize := concurrentSyncRequests * syncRequestBatchSize
if i < batchSize {
batchSize = i + 1
}
ss.SetStage(api.StageFetchingMessages)
startOffset := i + 1 - batchSize
bstout, batchErr := syncer.fetchMessages(ctx, headers[startOffset:startOffset+batchSize], startOffset)
ss.SetStage(api.StageMessages)
if batchErr != nil {
return xerrors.Errorf("failed to fetch messages: %w", batchErr)
}
for bsi := 0; bsi < len(bstout); bsi++ {
// temp storage so we don't persist data we dont want to
bs := bstore.NewTemporary()
blks := cbor.NewCborStore(bs)
this := headers[i-bsi]
bstip := bstout[len(bstout)-(bsi+1)]
fts, err := zipTipSetAndMessages(blks, this, bstip.Bls, bstip.Secpk, bstip.BlsIncludes, bstip.SecpkIncludes)
if err != nil {
log.Warnw("zipping failed", "error", err, "bsi", bsi, "i", i,
"height", this.Height(),
"next-height", i+batchSize)
return xerrors.Errorf("message processing failed: %w", err)
}
if err := cb(ctx, fts); err != nil {
return err
}
if err := persistMessages(ctx, bs, bstip); err != nil {
return err
}
if err := copyBlockstore(ctx, bs, syncer.store.Blockstore()); err != nil {
return xerrors.Errorf("message processing failed: %w", err)
}
}
i -= batchSize
}
return nil
}
func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) {
batchSize := len(headers)
batch := make([]*exchange.CompactedMessages, batchSize)
var wg sync.WaitGroup
var mx sync.Mutex
var batchErr error
start := build.Clock.Now()
for j := 0; j < batchSize; j += syncRequestBatchSize {
wg.Add(1)
go func(j int) {
defer wg.Done()
nreq := syncRequestBatchSize
if j+nreq > batchSize {
nreq = batchSize - j
}
failed := false
for offset := 0; !failed && offset < nreq; {
nextI := j + offset
lastI := j + nreq
var requestErr error
var requestResult []*exchange.CompactedMessages
for retry := 0; requestResult == nil && retry < syncRequestRetries; retry++ {
if retry > 0 {
log.Infof("fetching messages at %d (retry %d)", startOffset+nextI, retry)
} else {
log.Infof("fetching messages at %d", startOffset+nextI)
}
result, err := syncer.Exchange.GetChainMessages(ctx, headers[nextI:lastI])
if err != nil {
requestErr = multierror.Append(requestErr, err)
} else {
requestResult = result
}
}
mx.Lock()
if requestResult != nil {
copy(batch[j+offset:], requestResult)
offset += len(requestResult)
} else {
log.Errorf("error fetching messages at %d: %s", nextI, requestErr)
batchErr = multierror.Append(batchErr, requestErr)
failed = true
}
mx.Unlock()
}
}(j)
}
wg.Wait()
if batchErr != nil {
return nil, batchErr
}
log.Infof("fetching messages for %d tipsets at %d done; took %s", batchSize, startOffset, build.Clock.Since(start))
return batch, nil
}
func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.CompactedMessages) error {
_, span := trace.StartSpan(ctx, "persistMessages")
defer span.End()
for _, m := range bst.Bls {
//log.Infof("putting BLS message: %s", m.Cid())
if _, err := store.PutMessage(bs, m); err != nil {
log.Errorf("failed to persist messages: %+v", err)
return xerrors.Errorf("BLS message processing failed: %w", err)
}
}
for _, m := range bst.Secpk {
if m.Signature.Type != crypto.SigTypeSecp256k1 {
return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type)
}
//log.Infof("putting secp256k1 message: %s", m.Cid())
if _, err := store.PutMessage(bs, m); err != nil {
log.Errorf("failed to persist messages: %+v", err)
return xerrors.Errorf("secp256k1 message processing failed: %w", err)
}
}
return nil
}
// collectChain tries to advance our view of the chain to the purported head.
//
// It goes through various stages:
//
// 1. StageHeaders: we proceed in the sync process by requesting block headers
// from our peers, moving back from their heads, until we reach a tipset
// that we have in common (such a common tipset must exist, thought it may
// simply be the genesis block).
//
// If the common tipset is our head, we treat the sync as a "fast-forward",
// else we must drop part of our chain to connect to the peer's head
// (referred to as "forking").
//
// 2. StagePersistHeaders: now that we've collected the missing headers,
// augmented by those on the other side of a fork, we persist them to the
// BlockStore.
//
// 3. StageMessages: having acquired the headers and found a common tipset,
// we then move forward, requesting the full blocks, including the messages.
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "collectChain")
defer span.End()
ss := extractSyncState(ctx)
ss.Init(hts, ts)
headers, err := syncer.collectHeaders(ctx, ts, hts)
if err != nil {
ss.Error(err)
return err
}
span.AddAttributes(trace.Int64Attribute("syncChainLength", int64(len(headers))))
if !headers[0].Equals(ts) {
log.Errorf("collectChain headers[0] should be equal to sync target. Its not: %s != %s", headers[0].Cids(), ts.Cids())
}
ss.SetStage(api.StagePersistHeaders)
toPersist := make([]*types.BlockHeader, 0, len(headers)*int(build.BlocksPerEpoch))
for _, ts := range headers {
toPersist = append(toPersist, ts.Blocks()...)
}
if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil {
err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err)
ss.Error(err)
return err
}
toPersist = nil
ss.SetStage(api.StageMessages)
if err := syncer.syncMessagesAndCheckState(ctx, headers); err != nil {
err = xerrors.Errorf("collectChain syncMessages: %w", err)
ss.Error(err)
return err
}
ss.SetStage(api.StageSyncComplete)
log.Debugw("new tipset", "height", ts.Height(), "tipset", types.LogCids(ts.Cids()))
return nil
}
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
return gen.VerifyVRF(ctx, worker, rand, evrf)
}
func (syncer *Syncer) State() []SyncerStateSnapshot {
return syncer.syncmgr.State()
}
// MarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) MarkBad(blk cid.Cid) {
syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad"))
}
// UnmarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) UnmarkBad(blk cid.Cid) {
syncer.bad.Remove(blk)
}
func (syncer *Syncer) UnmarkAllBad() {
syncer.bad.Purge()
}
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
bbr, ok := syncer.bad.Has(blk)
return bbr.String(), ok
}
func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
next, err := syncer.store.LoadTipSet(cur.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
g, err := syncer.store.GetGenesis()
if err != nil {
return false
}
now := uint64(build.Clock.Now().Unix())
return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift)
}
|
[
"\"LOTUS_IGNORE_DRAND\""
] |
[] |
[
"LOTUS_IGNORE_DRAND"
] |
[]
|
["LOTUS_IGNORE_DRAND"]
|
go
| 1 | 0 | |
src/MyThink_MIC5_Decoder8.py
|
import os
import glob
import torch
import numpy as np
from PIL import Image
from skimage import io
from alisuretool.Tools import Tools
from torch.utils.data import DataLoader
from src.MyTrain_MIC5_Decoder8 import BASNet, DatasetUSOD
def one_decoder():
# --------- 1. get path ---------
has_mask = True
more_obj = False
# model_dir = './saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d2/120_train_3.043.pth'
# prediction_dir = Tools.new_dir('./test_data/my_train_mic5_decoder8_aug_mask_norm_5bce_d2_120_image_decoder')
model_dir = './saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d3/115_train_3.046.pth'
prediction_dir = Tools.new_dir('./test_data/my_train_mic5_decoder8_aug_mask_norm_5bce_d3_115_image_decoder')
# --------- 2. data loader ---------
image_dir = '/mnt/4T/Data/SOD/DUTS/DUTS-TR/DUTS-TR-Image/'
img_name_list = glob.glob(image_dir + '*.jpg')
test_dataset = DatasetUSOD(img_name_list=img_name_list, is_train=False)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=8)
# --------- 3. model define ---------
Tools.print("...load BASNet...")
net = BASNet(3, clustering_num_list=[128, 256, 512], pretrained=False, has_mask=has_mask, more_obj=more_obj)
if torch.cuda.is_available():
net.cuda()
net.load_state_dict(torch.load(model_dir), strict=False)
# --------- 4. inference for each image ---------
net.eval()
for i_test, (inputs_test, _) in enumerate(test_dataloader):
Tools.print("inference: {} {}".format(i_test, img_name_list[i_test]))
inputs_test = inputs_test.type(torch.FloatTensor).cuda()
return_m, return_d = net(inputs_test)
top_k_value, top_k_index = torch.topk(return_m["m1"]["smc_logits"], 1, 1)
smc_result = top_k_index.cpu().detach().numpy()[0][0]
img_name = img_name_list[i_test]
result_path = os.path.join(prediction_dir, str(smc_result))
result_path = Tools.new_dir(result_path)
# 1
result_name = os.path.join(result_path, os.path.split(img_name)[1])
im_data = io.imread(img_name)
io.imsave(result_name, im_data)
# 2
cam1 = return_d["label"]["cam_norm_1_up"].squeeze().cpu().data.numpy()
cam2 = return_d["label"]["cam_norm_2_up"].squeeze().cpu().data.numpy()
cam3 = return_d["label"]["cam_norm_3_up"].squeeze().cpu().data.numpy()
im1 = Image.fromarray(cam1 * 255).convert('RGB')
im2 = Image.fromarray(cam2 * 255).convert('RGB')
im3 = Image.fromarray(cam3 * 255).convert('RGB')
imo1 = im1.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)
imo2 = im2.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)
imo3 = im3.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)
imo1.save(os.path.join(result_path, '{}_{}_{}.png'.format(
os.path.splitext(os.path.basename(img_name))[0], 1, smc_result)))
imo2.save(os.path.join(result_path, '{}_{}_{}.png'.format(
os.path.splitext(os.path.basename(img_name))[0], 2, smc_result)))
imo3.save(os.path.join(result_path, '{}_{}_{}.png'.format(
os.path.splitext(os.path.basename(img_name))[0], 3, smc_result)))
# 3
camf = return_d["label"]["cam_norm_up"].squeeze().cpu().data.numpy()
imf = Image.fromarray(camf * 255).convert('RGB')
imof = imf.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)
imof.save(os.path.join(result_path, '{}_{}_{}.png'.format(
os.path.splitext(os.path.basename(img_name))[0], "f", smc_result)))
# 4
label = return_d["label"]["label"].squeeze().cpu().data.numpy()
im_label = Image.fromarray((np.asarray(label, dtype=np.uint8) + 1) * 127).convert('RGB')
imo_label = im_label.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)
imo_label.save(os.path.join(result_path, '{}_{}_{}.png'.format(
os.path.splitext(os.path.basename(img_name))[0], "l", smc_result)))
# 5
for key in ["d1", "d2", "d3"]:
d_out_up_sigmoid = return_d[key]["out_up_sigmoid"].squeeze().cpu().data.numpy()
im_d_out_up_sigmoid = Image.fromarray(d_out_up_sigmoid * 255).convert('RGB')
imo_d_out_up_sigmoid = im_d_out_up_sigmoid.resize((im_data.shape[1], im_data.shape[0]),
resample=Image.BILINEAR)
imo_d_out_up_sigmoid.save(os.path.join(result_path, '{}_{}_{}.png'.format(
os.path.splitext(os.path.basename(img_name))[0], key, smc_result)))
pass
pass
pass
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
one_decoder()
pass
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
utils/utils.go
|
package utils
import (
"log"
"os"
"path/filepath"
"strings"
)
// storagePaths provides a basic cache for StoragePath
var storagePaths = map[string]string{}
// StoragePath appName only recommends using something that can be a filename for now
func StoragePath(appName string) string {
if a, ok := storagePaths[appName]; ok {
return a
}
basedir := os.Getenv("NP2P_STORAGE_PATH")
if len(basedir) == 0 {
basedir = os.Getenv("XDG_CONFIG_HOME")
if len(basedir) == 0 {
basedir = os.Getenv("HOME")
if len(basedir) == 0 {
basedir = "./" // FIXME: set to cwd if dunno wth is going on
}
basedir = filepath.Join(basedir, ".config")
}
basedir = filepath.Join(basedir, "unifiedpush", "distributors")
err := os.MkdirAll(basedir, 0o700)
if err != nil {
basedir = "./"
// FIXME idk wth to do when there's an error here
}
}
finalFilename := filepath.Join(basedir, appName)
storagePaths[appName] = finalFilename
return finalFilename
}
var Log Logger
type Logger struct {
}
func (Logger) Debugln(inps ...interface{}) {
if os.Getenv("DEBUG") == "true" || strings.HasPrefix(os.Args[0], "/tmp/go-build") {
log.Println(inps...)
}
}
func (Logger) Infoln(inps ...interface{}) {
log.Println(inps...)
}
|
[
"\"NP2P_STORAGE_PATH\"",
"\"XDG_CONFIG_HOME\"",
"\"HOME\"",
"\"DEBUG\""
] |
[] |
[
"HOME",
"DEBUG",
"XDG_CONFIG_HOME",
"NP2P_STORAGE_PATH"
] |
[]
|
["HOME", "DEBUG", "XDG_CONFIG_HOME", "NP2P_STORAGE_PATH"]
|
go
| 4 | 0 | |
core/api_v2.go
|
package core
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
"github.com/jhunt/go-log"
"github.com/shieldproject/shield/core/vault"
"github.com/shieldproject/shield/db"
"github.com/shieldproject/shield/route"
"github.com/shieldproject/shield/timespec"
"github.com/shieldproject/shield/util"
)
type v2SystemArchive struct {
UUID string `json:"uuid"`
Schedule string `json:"schedule"`
TakenAt int64 `json:"taken_at"`
Expiry int `json:"expiry"`
Size int64 `json:"size"`
OK bool `json:"ok"`
Notes string `json:"notes"`
}
type v2SystemTask struct {
UUID string `json:"uuid"`
Type string `json:"type"`
Status string `json:"status"`
Owner string `json:"owner"`
RequestedAt int64 `json:"requested_at"`
StartedAt int64 `json:"started_at"`
StoppedAt int64 `json:"stopped_at"`
OK bool `json:"ok"`
Notes string `json:"notes"`
Archive *v2SystemArchive `json:"archive,omitempty"`
Log string `json:"log"`
JobUUID string `json:"job_uuid"`
TenantUUID string `json:"tenant_uuid"`
ArchiveUUID string `json:"archive_uuid"`
StoreUUID string `json:"store_uuid"`
TargetUUID string `json:"target_uuid"`
}
type v2SystemJob struct {
UUID string `json:"uuid"`
Schedule string `json:"schedule"`
Compression string `json:"compression"`
From string `json:"from"`
To string `json:"to"`
OK bool `json:"ok"`
Store struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Summary string `json:"summary"`
Plugin string `json:"plugin"`
Healthy bool `json:"healthy"`
} `json:"store"`
Keep struct {
N int `json:"n"`
Days int `json:"days"`
} `json:"keep"`
}
type v2System struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Notes string `json:"notes"`
OK bool `json:"ok"`
Compression string `json:"compression"`
Jobs []v2SystemJob `json:"jobs"`
Tasks []v2SystemTask `json:"tasks"`
}
type v2LocalTenant struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Role string `json:"role"`
}
type v2LocalUser struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Account string `json:"account"`
SysRole string `json:"sysrole"`
Tenants []v2LocalTenant `json:"tenants"`
}
func (c *Core) v2API() *route.Router {
r := &route.Router{
Debug: c.Config.Debug,
}
r.Dispatch("GET /v2/info", func(r *route.Request) { // {{{
r.OK(c.info)
})
// }}}
r.Dispatch("GET /v2/bearings", func(r *route.Request) { // {{{
var out struct {
/* Status of the internal SHIELD Vault. */
Vault string `json:"vault"`
/* Information about this SHIELD installation itself,
including its name, the MOTD, the UI theme color,
API and software versions, etc. */
SHIELD interface{} `json:"shield"`
/* The currently logged-in user. */
User *db.User `json:"user"`
/* Global storage systems */
Stores []*db.Store `json:"stores"`
/* Initial "seed" data for the web UI data layer.
This, combined with the stream of event data that
we get from the /v2/events web socket should
suffice, and mitigate polling. */
Tenants map[string]Bearing `json:"tenants"`
}
out.SHIELD = c.info
if user, err := c.db.GetUserForSession(r.SessionID()); err != nil {
r.Fail(route.Oops(err, "Unable to retrieve user information"))
return
} else if user != nil {
out.User = user
/* retrieve vault status */
out.Vault, err = c.vault.StatusString()
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve vault status"))
return
}
/* retrieve global stores */
out.Stores, err = c.db.GetAllStores(&db.StoreFilter{ForTenant: db.GlobalTenantUUID})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve global stores"))
return
}
/* retrieve the memberships for this user */
memberships, err := c.db.GetMembershipsForUser(user.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve user membership information"))
return
}
out.Tenants = make(map[string]Bearing)
for _, m := range memberships {
b, err := c.BearingFor(m)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve user membership information"))
return
}
out.Tenants[b.Tenant.UUID] = b
}
}
r.OK(out)
})
// }}}
r.Dispatch("GET /v2/health", func(r *route.Request) { // {{{
//you must be logged into shield to access shield health
if c.IsNotAuthenticated(r) {
return
}
health, err := c.checkHealth()
if err != nil {
r.Fail(route.Oops(err, "Unable to check SHIELD health"))
return
}
r.OK(health)
})
// }}}
r.Dispatch("GET /v2/scheduler/status", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
type named struct {
UUID string `json:"uuid"`
Name string `json:"name"`
}
type job struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Schedule string `json:"schedule"`
}
type archive struct {
UUID string `json:"uuid"`
Size int64 `json:"size"`
}
type backlogStatus struct {
Priority int `json:"priority"`
Position int `json:"position"`
TaskUUID string `json:"task_uuid"`
Op string `json:"op"`
Agent string `json:"agent"`
Tenant *named `json:"tenant,omitempty"`
Store *named `json:"store,omitempty"`
System *named `json:"system,omitempty"`
Job *job `json:"job,omitempty"`
Archive *archive `json:"archive,omitempty"`
}
type workerStatus struct {
ID int `json:"id"`
Idle bool `json:"idle"`
TaskUUID string `json:"task_uuid"`
LastSeen int `json:"last_seen"`
Op string `json:"op"`
Status string `json:"status"`
Agent string `json:"agent"`
Tenant *named `json:"tenant,omitempty"`
Store *named `json:"store,omitempty"`
System *named `json:"system,omitempty"`
Job *job `json:"job,omitempty"`
Archive *archive `json:"archive,omitempty"`
}
type status struct {
Backlog []backlogStatus `json:"backlog"`
Workers []workerStatus `json:"workers"`
}
ps := c.scheduler.Status()
out := status{
Backlog: make([]backlogStatus, len(ps.Backlog)),
Workers: make([]workerStatus, len(ps.Workers)),
}
tenants := make(map[string]*db.Tenant)
stores := make(map[string]*db.Store)
systems := make(map[string]*db.Target)
jobs := make(map[string]*db.Job)
archives := make(map[string]*db.Archive)
for i, x := range ps.Backlog {
out.Backlog[i].Priority = x.Priority + 1
out.Backlog[i].Position = x.Position
out.Backlog[i].TaskUUID = x.TaskUUID
if task, err := c.db.GetTask(x.TaskUUID); err == nil && task != nil {
out.Backlog[i].Op = task.Op
out.Backlog[i].Agent = task.Agent
if task.JobUUID != "" {
j, found := jobs[task.JobUUID]
if !found {
j, err = c.db.GetJob(task.JobUUID)
if j != nil && err == nil {
jobs[j.UUID] = j
found = true
}
}
if found {
out.Backlog[i].Job = &job{
UUID: j.UUID,
Name: j.Name,
}
}
}
out.Backlog[i].Tenant = &named{Name: "SYSTEM"}
if task.TenantUUID != "" {
if task.TenantUUID == db.GlobalTenantUUID {
out.Backlog[i].Tenant.Name = "GLOBAL"
} else {
t, found := tenants[task.TenantUUID]
if !found {
t, err = c.db.GetTenant(task.TenantUUID)
if t != nil && err == nil {
tenants[t.UUID] = t
found = true
}
}
if found {
out.Backlog[i].Tenant.UUID = t.UUID
out.Backlog[i].Tenant.Name = t.Name
}
}
}
if task.StoreUUID != "" {
s, found := stores[task.StoreUUID]
if !found {
s, err = c.db.GetStore(task.StoreUUID)
if s != nil && err == nil {
stores[s.UUID] = s
found = true
}
}
if found {
out.Backlog[i].Store = &named{
UUID: s.UUID,
Name: s.Name,
}
}
}
if task.TargetUUID != "" {
t, found := systems[task.TargetUUID]
if !found {
t, err = c.db.GetTarget(task.TargetUUID)
if t != nil && err == nil {
systems[t.UUID] = t
found = true
}
}
if found {
out.Backlog[i].System = &named{
UUID: t.UUID,
Name: t.Name,
}
}
}
if task.ArchiveUUID != "" {
a, found := archives[task.ArchiveUUID]
if !found {
a, err = c.db.GetArchive(task.ArchiveUUID)
if a != nil && err == nil {
archives[a.UUID] = a
found = true
}
}
if found {
out.Backlog[i].Archive = &archive{
UUID: a.UUID,
Size: a.Size,
}
}
}
}
}
for i, x := range ps.Workers {
out.Workers[i].ID = x.ID
out.Workers[i].Idle = x.Idle
out.Workers[i].TaskUUID = x.TaskUUID
out.Workers[i].LastSeen = x.LastSeen
if x.TaskUUID == "" {
continue
}
if task, err := c.db.GetTask(x.TaskUUID); err == nil && task != nil {
out.Workers[i].Op = task.Op
out.Workers[i].Status = task.Status
out.Workers[i].Agent = task.Agent
if task.JobUUID != "" {
j, found := jobs[task.JobUUID]
if !found {
j, err = c.db.GetJob(task.JobUUID)
if j != nil && err == nil {
jobs[j.UUID] = j
found = true
}
}
if found {
out.Workers[i].Job = &job{
UUID: j.UUID,
Name: j.Name,
}
}
}
out.Workers[i].Tenant = &named{Name: "SYSTEM"}
if task.TenantUUID != "" {
if task.TenantUUID == db.GlobalTenantUUID {
out.Workers[i].Tenant.Name = "GLOBAL"
} else {
t, found := tenants[task.TenantUUID]
if !found {
t, err = c.db.GetTenant(task.TenantUUID)
if t != nil && err == nil {
tenants[t.UUID] = t
found = true
}
}
if found {
out.Workers[i].Tenant.UUID = t.UUID
out.Workers[i].Tenant.Name = t.Name
}
}
}
if task.StoreUUID != "" {
s, found := stores[task.StoreUUID]
if !found {
s, err = c.db.GetStore(task.StoreUUID)
if s != nil && err == nil {
stores[s.UUID] = s
found = true
}
}
if found {
out.Workers[i].Store = &named{
UUID: s.UUID,
Name: s.Name,
}
}
}
if task.TargetUUID != "" {
t, found := systems[task.TargetUUID]
if !found {
t, err = c.db.GetTarget(task.TargetUUID)
if t != nil && err == nil {
systems[t.UUID] = t
found = true
}
}
if found {
out.Workers[i].System = &named{
UUID: t.UUID,
Name: t.Name,
}
}
}
if task.ArchiveUUID != "" {
a, found := archives[task.ArchiveUUID]
if !found {
a, err = c.db.GetArchive(task.ArchiveUUID)
if a != nil && err == nil {
archives[a.UUID] = a
found = true
}
}
if found {
out.Workers[i].Archive = &archive{
UUID: a.UUID,
Size: a.Size,
}
}
}
}
}
r.OK(out)
})
// }}}
r.Dispatch("GET /v2/mbus/status", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
r.OK(c.bus.DumpState())
})
// }}}
r.Dispatch("GET /v2/events", func(r *route.Request) { // {{{
//you must be logged into shield to access the event stream
if c.IsNotAuthenticated(r) {
return
}
user, err := c.AuthenticatedUser(r)
if err != nil {
r.Fail(route.Oops(err, "Unable to configure your SHIELD events stream"))
return
}
queues := []string{
"user:" + user.UUID,
"tenant:" + db.GlobalTenantUUID,
}
memberships, err := c.db.GetMembershipsForUser(user.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to configure your SHIELD events stream"))
return
}
for _, membership := range memberships {
queues = append(queues, "tenant:"+membership.TenantUUID)
}
if user.SysRole != "" {
queues = append(queues, "admins")
}
socket := r.Upgrade(route.WebSocketSettings{
WriteTimeout: time.Duration(c.Config.API.Websocket.WriteTimeout) * time.Second,
})
if socket == nil {
return
}
log.Infof("registering message bus web client")
ch, slot, err := c.bus.Register(queues)
if err != nil {
r.Fail(route.Oops(err, "Unable to begin streaming SHIELD events"))
return
}
log.Infof("registered with message bus as [id:%d]", slot)
closeMeSoftly := func() { c.bus.Unregister(slot) }
go socket.Discard(closeMeSoftly)
pingInterval := time.Duration(c.Config.API.Websocket.PingInterval) * time.Second
pingTimer := time.NewTimer(pingInterval)
writeLoop:
for {
select {
case event := <-ch:
b, err := json.Marshal(event)
if err != nil {
log.Errorf("message bus web client [id:%d] failed to marshal JSON for websocket relay: %s", slot, err)
} else {
if done, err := socket.Write(b); done {
log.Infof("message bus web client [id:%d] closed their end of the socket", slot)
log.Infof("message bus web client [id:%d] shutting down", slot)
closeMeSoftly()
break writeLoop
} else if err != nil {
log.Errorf("message bus web client [id:%d] failed to write message to remote end: %s", slot, err)
log.Errorf("message bus web client [id:%d] shutting down", slot)
closeMeSoftly()
err := socket.SendClose()
if err != nil {
log.Warnf("message bus web client [id:%d] failed to write close message")
}
break writeLoop
}
}
if !pingTimer.Stop() {
<-pingTimer.C
}
case <-pingTimer.C:
if err := socket.Ping(); err != nil {
log.Infof("message bus web client [id:%d] failed to write ping")
closeMeSoftly()
break writeLoop
}
}
pingTimer.Reset(pingInterval)
}
pingTimer.Stop()
log.Infof("message bus web client [id:%d] disconnected; unregistering...", slot)
closeMeSoftly()
})
// }}}
r.Dispatch("GET /v2/tasks", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
limit, err := strconv.Atoi(r.Param("limit", "30"))
if err != nil || limit < 0 || limit > 30 {
r.Fail(route.Bad(err, "Invalid limit parameter given"))
return
}
// check to see if we're offseting task requests
paginationDate, err := strconv.ParseInt(r.Param("before", "0"), 10, 64)
if err != nil || paginationDate < 0 {
r.Fail(route.Bad(err, "Invalid before parameter given"))
return
}
tasks, err := c.db.GetAllTasks(
&db.TaskFilter{
UUID: r.Param("uuid", ""),
ExactMatch: r.ParamIs("exact", "t"),
SkipActive: r.ParamIs("active", "f"),
SkipInactive: r.ParamIs("active", "t"),
ForStatus: r.Param("status", ""),
ForTarget: r.Param("target", ""),
ForStore: r.Param("store", ""),
ForOp: r.Param("type", ""),
Limit: limit,
Before: paginationDate,
StartedAfter: r.ParamDuration("started_after"),
StoppedAfter: r.ParamDuration("stopped_after"),
StartedBefore: r.ParamDuration("started_before"),
StoppedBefore: r.ParamDuration("stopped_before"),
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve task information"))
return
}
r.OK(tasks)
})
// }}}
r.Dispatch("GET /v2/tasks/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
task, err := c.db.GetTask(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve task information"))
return
}
if task == nil || task.TenantUUID != db.GlobalTenantUUID {
r.Fail(route.NotFound(err, "No such task"))
return
}
r.OK(task)
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid/tasks/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
task, err := c.db.GetTask(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve task information"))
return
}
if task == nil || task.TenantUUID == db.GlobalTenantUUID {
r.Fail(route.NotFound(err, "No such task"))
return
}
if err := c.db.CancelTask(task.UUID, time.Now()); err != nil {
r.Fail(route.Oops(err, "Unable to cancel task"))
return
}
r.Success("Canceled task successfully")
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/health", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
health, err := c.checkTenantHealth(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to check SHIELD health"))
return
}
r.OK(health)
})
// }}}
r.Dispatch("POST /v2/init", func(r *route.Request) { // {{{
var in struct {
Master string `json:"master"`
}
if !r.Payload(&in) {
return
}
if r.Missing("master", in.Master) {
return
}
log.Infof("%s: initializing the SHIELD Core...", r)
status, err := c.vault.Status()
if err != nil {
r.Fail(route.Oops(err, "Unable to initialize the SHIELD Core"))
return
}
if status != vault.Blank {
r.Fail(route.Bad(nil, "this SHIELD Core has already been initialized"))
return
}
fixedKey, err := c.vault.Initialize(c.CryptFile(), in.Master)
if err != nil {
r.Fail(route.Oops(err, "Unable to initialize the SHIELD Core"))
return
}
r.OK(
struct {
Response string `json:"response"`
FixedKey string `json:"fixed_key"`
}{
"Successfully initialized the SHIELD Core",
fixedKey,
})
})
// }}}
r.Dispatch("POST /v2/lock", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
status, err := c.vault.Status()
if err != nil {
r.Fail(route.Forbidden(err, "Unable to lock the SHIELD Core"))
return
}
if status == vault.Blank {
r.Fail(route.Bad(nil, "this SHIELD Core has not yet been initialized"))
return
}
if err := c.vault.Seal(); err != nil {
r.Fail(route.Oops(err, "Unable to lock the SHIELD Core"))
return
}
c.bus.Send("lock-core", "", nil, "*")
r.Success("Successfully locked the SHIELD Core")
})
// }}}
r.Dispatch("POST /v2/unlock", func(r *route.Request) { // {{{
var in struct {
Master string `json:"master"`
}
if !r.Payload(&in) {
return
}
if r.Missing("master", in.Master) {
return
}
status, err := c.vault.Status()
if err != nil {
r.Fail(route.Forbidden(err, "Unable to unlock the SHIELD Core: an internal error has occurred"))
return
}
if status == vault.Blank {
r.Fail(route.Bad(nil, "Unable to unlock the SHIELD Core: this SHIELD Core has not yet been initialized"))
return
}
if err := c.vault.Unseal(c.CryptFile(), in.Master); err != nil {
if strings.Contains(err.Error(), "incorrect master password") {
r.Fail(route.Forbidden(err, "Unable to unlock the SHIELD Core: incorrect master password"))
return
}
r.Fail(route.Oops(err, "Unable to unlock the SHIELD Core: an internal error has occurred"))
return
}
c.bus.Send("unlock-core", "", nil, "*")
r.Success("Successfully unlocked the SHIELD Core")
})
// }}}
r.Dispatch("POST /v2/rekey", func(r *route.Request) { // {{{
var in struct {
Current string `json:"current"`
New string `json:"new"`
RotateFixed bool `json:"rotate_fixed_key"`
}
if !r.Payload(&in) {
return
}
if r.Missing("current", in.Current, "new", in.New) {
return
}
fixedKey, err := c.vault.Rekey(c.CryptFile(), in.Current, in.New, in.RotateFixed)
if err != nil {
if strings.Contains(err.Error(), "incorrect master password") {
r.Fail(route.Oops(err, "Unable to rekey the SHIELD Core: incorrect (current) master password"))
return
}
r.Fail(route.Oops(err, "Unable to rekey the SHIELD Core: an internal error has occurred"))
return
}
r.OK(
struct {
Response string `json:"response"`
FixedKey string `json:"fixed_key"`
}{
"Successfully rekeyed the SHIELD Core",
fixedKey,
})
})
// }}}
r.Dispatch("POST /v2/ui/users", func(r *route.Request) { // {{{
var in struct {
Search string `json:"search"`
}
if !r.Payload(&in) {
return
}
if len(in.Search) < 3 {
r.OK([]string{})
return
}
users, err := c.db.GetAllUsers(&db.UserFilter{
Search: in.Search,
Backend: "local",
})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve users from the database."))
return
}
r.OK(users)
})
// }}}
r.Dispatch("POST /v2/ui/check/timespec", func(r *route.Request) { // {{{
var in struct {
Timespec string `json:"timespec"`
}
if !r.Payload(&in) {
return
}
spec, err := timespec.Parse(in.Timespec)
if err != nil {
r.Fail(route.Bad(err, fmt.Sprintf("%s", err)))
return
}
r.Success("%s", spec)
})
// }}}
r.Dispatch("GET /v2/auth/providers", func(r *route.Request) { // {{{
l := make([]AuthProviderConfig, 0)
for _, auth := range c.providers {
cfg := auth.Configuration(false)
l = append(l, cfg)
}
r.OK(l)
})
// }}}
r.Dispatch("GET /v2/auth/providers/:name", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
a, ok := c.providers[r.Args[1]]
if !ok {
r.Fail(route.NotFound(nil, "No such authentication provider"))
return
}
r.OK(a.Configuration(true))
})
// }}}
r.Dispatch("GET /v2/auth/local/users", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
limit, err := strconv.Atoi(r.Param("limit", "0"))
if err != nil || limit < 0 {
r.Fail(route.Bad(err, "Invalid limit parameter given"))
return
}
l, err := c.db.GetAllUsers(&db.UserFilter{
UUID: r.Param("uuid", ""),
Account: r.Param("account", ""),
SysRole: r.Param("sysrole", ""),
ExactMatch: r.ParamIs("exact", "t"),
Backend: "local",
Limit: limit,
})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve local users information"))
return
}
users := make([]v2LocalUser, len(l))
for i, user := range l {
memberships, err := c.db.GetMembershipsForUser(user.UUID)
if err != nil {
log.Errorf("failed to retrieve tenant memberships for user %s@%s (uuid %s): %s",
user.Account, user.Backend, user.UUID, err)
r.Fail(route.Oops(err, "Unable to retrieve local users information"))
return
}
users[i] = v2LocalUser{
UUID: user.UUID,
Name: user.Name,
Account: user.Account,
SysRole: user.SysRole,
Tenants: make([]v2LocalTenant, len(memberships)),
}
for j, membership := range memberships {
users[i].Tenants[j].UUID = membership.TenantUUID
users[i].Tenants[j].Name = membership.TenantName
users[i].Tenants[j].Role = membership.Role
}
}
r.OK(users)
})
// }}}
r.Dispatch("GET /v2/auth/local/users/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
user, err := c.db.GetUserByID(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve local user information"))
return
}
if user == nil {
r.Fail(route.NotFound(nil, "user '%s' not found (for local auth provider)", r.Args[1]))
return
}
memberships, err := c.db.GetMembershipsForUser(user.UUID)
if err != nil {
log.Errorf("failed to retrieve tenant memberships for user %s@%s (uuid %s): %s",
user.Account, user.Backend, user.UUID, err)
r.Fail(route.Oops(err, "Unable to retrieve local user information"))
return
}
local_user := v2LocalUser{
UUID: user.UUID,
Name: user.Name,
Account: user.Account,
SysRole: user.SysRole,
Tenants: make([]v2LocalTenant, len(memberships)),
}
for j, membership := range memberships {
local_user.Tenants[j].UUID = membership.TenantUUID
local_user.Tenants[j].Name = membership.TenantName
local_user.Tenants[j].Role = membership.Role
}
r.OK(local_user)
})
// }}}
r.Dispatch("POST /v2/auth/local/users", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
var in struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Account string `json:"account"`
Password string `json:"password"`
SysRole string `json:"sysrole"`
}
if !r.Payload(&in) {
return
}
if r.Missing("name", in.Name, "account", in.Account, "password", in.Password) {
return
}
if in.SysRole != "" {
switch in.SysRole {
case
"admin",
"manager",
"engineer":
default:
r.Fail(route.Bad(nil, "System role '%s' is invalid", in.SysRole))
return
}
}
u := &db.User{
UUID: in.UUID,
Name: in.Name,
Account: in.Account,
Backend: "local",
SysRole: in.SysRole,
}
u.SetPassword(in.Password)
exists, err := c.db.GetUser(u.Account, "local")
if err != nil {
r.Fail(route.Oops(err, "Unable to create local user '%s'", in.Account))
return
}
if exists != nil {
r.Fail(route.Bad(nil, "user '%s' already exists", u.Account))
return
}
u, err = c.db.CreateUser(u)
if u == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create local user '%s'", in.Account))
return
}
r.OK(u)
})
// }}}
r.Dispatch("PATCH /v2/auth/local/users/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
var in struct {
Name string `json:"name"`
Password string `json:"password"`
SysRole string `json:"sysrole"`
}
if !r.Payload(&in) {
return
}
user, err := c.db.GetUserByID(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to update local user '%s'", user.Account))
return
}
if user == nil || user.Backend != "local" {
r.Fail(route.NotFound(nil, "No such local user"))
return
}
if in.Name != "" {
user.Name = in.Name
}
if in.SysRole != "" {
switch in.SysRole {
case
"admin",
"manager",
"engineer":
user.SysRole = in.SysRole
default:
r.Fail(route.Bad(nil, "System role '%s' is invalid", in.SysRole))
return
}
}
if in.Password != "" {
user.SetPassword(in.Password)
}
err = c.db.UpdateUser(user)
if err != nil {
r.Fail(route.Oops(err, "Unable to update local user '%s'", user.Account))
return
}
r.Success("Updated")
})
// }}}
r.Dispatch("DELETE /v2/auth/local/users/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
user, err := c.db.GetUserByID(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve local user information"))
return
}
if user == nil || user.Backend != "local" {
r.Fail(route.NotFound(nil, "Local User '%s' not found", r.Args[1]))
return
}
err = c.db.DeleteUser(user)
if err != nil {
r.Fail(route.Oops(err, "Unable to delete local user '%s' (%s)", r.Args[1], user.Account))
return
}
r.Success("Successfully deleted local user")
})
// }}}
r.Dispatch("GET /v2/auth/tokens", func(r *route.Request) { // {{{
if c.IsNotAuthenticated(r) {
return
}
user, _ := c.AuthenticatedUser(r)
tokens, err := c.db.GetAllAuthTokens(&db.AuthTokenFilter{
User: user,
})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tokens information"))
return
}
for i := range tokens {
tokens[i].Session = ""
}
r.OK(tokens)
})
// }}}
r.Dispatch("POST /v2/auth/tokens", func(r *route.Request) { // {{{
if c.IsNotAuthenticated(r) {
return
}
user, _ := c.AuthenticatedUser(r)
var in struct {
Name string `json:"name"`
}
if !r.Payload(&in) {
return
}
if r.Missing("name", in.Name) {
return
}
existing, err := c.db.GetAllAuthTokens(&db.AuthTokenFilter{
Name: in.Name,
User: user,
})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tokens information"))
return
}
if len(existing) != 0 {
r.Fail(route.Bad(err, "A token with this name already exists"))
return
}
token, id, err := c.db.GenerateAuthToken(in.Name, user)
if id == "" || err != nil {
r.Fail(route.Oops(err, "Unable to generate new token"))
return
}
r.OK(token)
})
// }}}
r.Dispatch("DELETE /v2/auth/tokens/:token", func(r *route.Request) { // {{{
if c.IsNotAuthenticated(r) {
return
}
user, _ := c.AuthenticatedUser(r)
if err := c.db.DeleteAuthToken(r.Args[1], user); err != nil {
r.Fail(route.Oops(err, "Unable to revoke auth token"))
return
}
r.Success("Token revoked")
})
// }}}
r.Dispatch("GET /v2/auth/sessions", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
limit, err := strconv.Atoi(r.Param("limit", "0"))
if err != nil || limit < 0 {
r.Fail(route.Bad(err, "Invalid limit parameter given"))
return
}
sessions, err := c.db.GetAllSessions(
&db.SessionFilter{
UUID: r.Param("uuid", ""),
UserUUID: r.Param("user_uuid", ""),
Name: r.Param("name", ""),
IP: r.Param("ip_addr", ""),
ExactMatch: r.ParamIs("exact", "t"),
IsToken: r.ParamIs("is_token", "t"),
Limit: limit,
},
)
for _, session := range sessions {
if session.UUID == r.SessionID() {
session.CurrentSession = true
break
}
}
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve session information"))
return
}
r.OK(sessions)
})
// }}}
r.Dispatch("GET /v2/auth/sessions/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
limit, err := strconv.Atoi(r.Param("limit", "0"))
if err != nil || limit < 0 {
r.Fail(route.Bad(err, "Invalid limit parameter given"))
return
}
session, err := c.db.GetSession(r.Args[1])
if session == nil || err != nil {
r.Fail(route.Oops(err, "Unable to retrieve session information"))
return
}
if session.UUID == r.SessionID() {
session.CurrentSession = true
}
r.OK(session)
})
// }}}
r.Dispatch("DELETE /v2/auth/sessions/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
session, err := c.db.GetSession(r.Args[1])
if session == nil || err != nil {
r.Fail(route.Oops(err, "Unable to retrieve session information"))
return
}
if session == nil {
r.Fail(route.NotFound(nil, "Session not found"))
return
}
if err := c.db.ClearSession(session.UUID); err != nil {
r.Fail(route.Oops(err, "Unable to clear session '%s' (%s)", r.Args[1], session.IP))
return
}
r.Success("Successfully cleared session '%s' (%s)", r.Args[1], session.IP)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/systems", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
targets, err := c.db.GetAllTargets(
&db.TargetFilter{
SkipUsed: r.ParamIs("unused", "t"),
SkipUnused: r.ParamIs("unused", "f"),
UUID: r.Param("uuid", ""),
SearchName: r.Param("name", ""),
ForPlugin: r.Param("plugin", ""),
ExactMatch: r.ParamIs("exact", "t"),
ForTenant: r.Args[1],
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve systems information"))
return
}
systems := make([]v2System, len(targets))
for i, target := range targets {
err := c.v2copyTarget(&systems[i], target)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve systems information"))
return
}
}
r.OK(systems)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/systems/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
target, err := c.db.GetTarget(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
if target == nil || target.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(err, "No such system"))
return
}
var system v2System
err = c.v2copyTarget(&system, target)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
// keep track of our archives, indexed by task UUID
archives := make(map[string]*db.Archive)
aa, err := c.db.GetAllArchives(
&db.ArchiveFilter{
ForTarget: target.UUID,
WithStatus: []string{"valid"},
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
for _, archive := range aa {
archives[archive.UUID] = archive
}
// check to see if we're offseting task requests
paginationDate, err := strconv.ParseInt(r.Param("before", "0"), 10, 64)
if err != nil || paginationDate < 0 {
r.Fail(route.Bad(err, "Invalid before parameter given"))
return
}
tasks, err := c.db.GetAllTasks(
&db.TaskFilter{
ForTarget: target.UUID,
OnlyRelevant: true,
Before: paginationDate,
Limit: 30,
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
//check if there's more tasks on the specific last date and append if so
if len(tasks) > 0 {
appendingtasks, err := c.db.GetAllTasks(
&db.TaskFilter{
ForTarget: target.UUID,
OnlyRelevant: true,
RequestedAt: tasks[len(tasks)-1].RequestedAt,
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
if (len(appendingtasks) > 1) && (tasks[len(tasks)-1].UUID != appendingtasks[len(appendingtasks)-1].UUID) {
log.Infof("Got a misjointed request, need to merge these two arrays.")
for i, task := range appendingtasks {
if task.UUID == tasks[len(tasks)-1].UUID {
tasks = append(tasks, appendingtasks[i+1:]...)
break
}
}
}
}
if !c.CanSeeCredentials(r, r.Args[1]) {
c.db.RedactAllTaskLogs(tasks)
}
system.Tasks = make([]v2SystemTask, len(tasks))
for i, task := range tasks {
system.Tasks[i].UUID = task.UUID
system.Tasks[i].Type = task.Op
system.Tasks[i].Status = task.Status
system.Tasks[i].Owner = task.Owner
system.Tasks[i].OK = task.OK
system.Tasks[i].Notes = task.Notes
system.Tasks[i].RequestedAt = task.RequestedAt
system.Tasks[i].StartedAt = task.StartedAt
system.Tasks[i].StoppedAt = task.StoppedAt
system.Tasks[i].Log = task.Log
system.Tasks[i].JobUUID = task.JobUUID
system.Tasks[i].TenantUUID = task.TenantUUID
system.Tasks[i].StoreUUID = task.StoreUUID
system.Tasks[i].ArchiveUUID = task.ArchiveUUID
system.Tasks[i].TargetUUID = task.TargetUUID
if archive, ok := archives[task.ArchiveUUID]; ok {
system.Tasks[i].Archive = &v2SystemArchive{
UUID: archive.UUID,
Schedule: archive.Job,
Expiry: (int)((archive.ExpiresAt - archive.TakenAt) / 86400),
Notes: archive.Notes,
Size: archive.Size,
}
}
}
r.OK(system)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/systems/:uuid/config", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
target, err := c.db.GetTarget(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
if target == nil || target.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(err, "No such system"))
return
}
config, err := target.Configuration(c.db, c.CanSeeCredentials(r, target.TenantUUID))
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
r.OK(config)
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/systems", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
var in struct {
Target struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Summary string `json:"summary"`
Plugin string `json:"plugin"`
Agent string `json:"agent"`
Compression string `json:"compression"`
Config map[string]interface{} `json:"config"`
} `json:"target"`
Store struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Summary string `json:"summary"`
Plugin string `json:"plugin"`
Agent string `json:"agent"`
Threshold int64 `json:"threshold"`
Config map[string]interface{} `json:"config"`
} `json:"store"`
Job struct {
Name string `json:"name"`
Schedule string `json:"schedule"`
KeepDays int `json:"keep_days"`
FixedKey bool `json:"fixed_key"`
Paused bool `json:"paused"`
KeepN int
} `json:"job"`
}
if !r.Payload(&in) {
return
}
sched, err := timespec.Parse(in.Job.Schedule)
if err != nil {
r.Fail(route.Oops(err, "Invalid or malformed SHIELD Job Schedule '%s'", in.Job.Schedule))
return
}
if in.Job.KeepDays < 0 {
r.Fail(route.Oops(nil, "Invalid or malformed SHIELD Job Archive Retention Period '%dd'", in.Job.KeepDays))
return
}
if in.Job.KeepDays < c.Config.Limit.Retention.Min {
r.Fail(route.Oops(nil, "SHIELD Job Archive Retention Period '%dd' is too short, archives must be kept for a minimum of %d days", in.Job.KeepDays, c.Config.Limit.Retention.Min))
return
}
if in.Job.KeepDays > c.Config.Limit.Retention.Max {
r.Fail(route.Oops(nil, "SHIELD Job Archive Retention Period '%dd' is too long, archives may be kept for a maximum of %d days", in.Job.KeepDays, c.Config.Limit.Retention.Max))
return
}
in.Job.KeepN = sched.KeepN(in.Job.KeepDays)
if in.Target.Compression == "" {
in.Target.Compression = DefaultCompressionType
}
var (
target *db.Target
store *db.Store
)
if in.Target.UUID != "" {
target, err = c.db.GetTarget(in.Target.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
if target == nil || target.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such system"))
return
}
} else {
target, err = c.db.CreateTarget(&db.Target{
TenantUUID: r.Args[1],
Name: in.Target.Name,
Summary: in.Target.Summary,
Plugin: in.Target.Plugin,
Config: in.Target.Config,
Agent: in.Target.Agent,
Compression: in.Target.Compression,
Healthy: true,
})
if target == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new data target"))
return
}
}
if in.Store.UUID != "" {
store, err = c.db.GetStore(in.Store.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve cloud storage information"))
return
}
if store == nil || (!store.Global && store.TenantUUID != r.Args[1]) {
r.Fail(route.NotFound(nil, "No such store"))
return
}
} else {
store, err = c.db.CreateStore(&db.Store{
TenantUUID: r.Args[1],
Name: in.Store.Name,
Summary: in.Store.Summary,
Agent: in.Store.Agent,
Plugin: in.Store.Plugin,
Config: in.Store.Config,
Threshold: in.Store.Threshold,
Healthy: true, /* let's be optimistic */
})
if store == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new storage system"))
return
}
if _, err := c.db.CreateTestStoreTask("system", store); err != nil {
log.Errorf("failed to schedule storage test task (non-critical) for %s (%s): %s",
store.Name, store.UUID, err)
}
}
job, err := c.db.CreateJob(&db.Job{
TenantUUID: r.Args[1],
Name: in.Job.Name,
Schedule: in.Job.Schedule,
KeepN: in.Job.KeepN,
KeepDays: in.Job.KeepDays,
Paused: in.Job.Paused,
StoreUUID: store.UUID,
TargetUUID: target.UUID,
FixedKey: in.Job.FixedKey,
})
if job == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new job"))
return
}
r.OK(target)
})
// }}}
r.Dispatch("PATCH /v2/tenants/:uuid/systems/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
var in struct {
Annotations []struct {
Type string `json:"type"`
UUID string `json:"uuid"`
Disposition string `json:"disposition"`
Notes string `json:"notes"`
Clear string `json:"clear"`
} `json:"annotations"`
}
if !r.Payload(&in) {
return
}
target, err := c.db.GetTarget(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve system information"))
return
}
if target == nil || target.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such system"))
return
}
for _, ann := range in.Annotations {
switch ann.Type {
case "task":
err = c.db.AnnotateTargetTask(
target.UUID,
ann.UUID,
&db.TaskAnnotation{
Disposition: ann.Disposition,
Notes: ann.Notes,
Clear: ann.Clear,
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to annotate task %s", ann.UUID))
return
}
case "archive":
err = c.db.AnnotateTargetArchive(
target.UUID,
ann.UUID,
ann.Notes,
)
if err != nil {
r.Fail(route.Oops(err, "Unable to annotate archive %s", ann.UUID))
return
}
default:
r.Fail(route.Bad(nil, "unrecognized system annotation type '%s'", ann.Type))
return
}
}
_ = c.db.MarkTasksIrrelevant()
r.Success("annotated successfully")
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid/systems/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
/* FIXME */
r.Fail(route.Errorf(501, nil, "%s: not implemented", r))
})
// }}}
r.Dispatch("GET /v2/agents", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
agents, err := c.db.GetAllAgents(&db.AgentFilter{
UUID: r.Param("uuid", ""),
ExactMatch: r.ParamIs("exact", "t"),
SkipHidden: r.ParamIs("hidden", "f"),
SkipVisible: r.ParamIs("hidden", "t"),
})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
resp := struct {
Agents []*db.Agent `json:"agents"`
Problems map[string][]string `json:"problems"`
}{
Agents: agents,
Problems: make(map[string][]string),
}
for _, agent := range agents {
id := agent.UUID
pp := make([]string, 0)
if agent.Version == "" {
pp = append(pp, Problems["legacy-shield-agent-version"])
}
if agent.Version == "dev" {
pp = append(pp, Problems["dev-shield-agent-version"])
}
resp.Problems[id] = pp
}
r.OK(resp)
})
// }}}
r.Dispatch("GET /v2/agents/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
agent, err := c.db.GetAgent(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
if agent == nil {
r.Fail(route.NotFound(nil, "No such agent"))
return
}
raw, err := agent.Metadata()
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
resp := struct {
Agent db.Agent `json:"agent"`
Metadata map[string]interface{} `json:"metadata"`
Problems []string `json:"problems"`
}{
Agent: *agent,
Metadata: raw,
Problems: make([]string, 0),
}
if agent.Version == "" {
resp.Problems = append(resp.Problems, Problems["legacy-shield-agent-version"])
}
if agent.Version == "dev" {
resp.Problems = append(resp.Problems, Problems["dev-shield-agent-version"])
}
r.OK(resp)
})
// }}}
r.Dispatch("DELETE /v2/agents/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
agent, err := c.db.GetAgent(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
if agent == nil {
r.Fail(route.NotFound(nil, "No such agent"))
return
}
err = c.db.DeleteAgent(agent)
if err != nil {
r.Fail(route.Oops(err, "Unable to delete agent"))
return
}
r.Success("deleted agent %s (at %s)", agent.Name, agent.Address)
})
// }}}
r.Dispatch("POST /v2/agents", func(r *route.Request) { // {{{
var in struct {
Name string `json:"name"`
Port int `json:"port"`
}
if !r.Payload(&in) {
return
}
peer := regexp.MustCompile(`:\d+$`).ReplaceAllString(r.Req.Header.Get("X-Forwarded-For"), "")
if peer == "" {
peer = regexp.MustCompile(`:\d+$`).ReplaceAllString(r.Req.RemoteAddr, "")
if peer == "" {
r.Fail(route.Oops(nil, "Unable to determine remote peer address from '%s'", r.Req.RemoteAddr))
return
}
}
if in.Name == "" {
r.Fail(route.Bad(nil, "No `name' provided with pre-registration request"))
return
}
if in.Port == 0 {
r.Fail(route.Bad(nil, "No `port' provided with pre-registration request"))
return
}
err := c.db.PreRegisterAgent(peer, in.Name, in.Port)
if err != nil {
r.Fail(route.Oops(err, "Unable to pre-register agent %s at %s:%d", in.Name, peer, in.Port))
return
}
r.Success("pre-registered agent %s at %s:%d", in.Name, peer, in.Port)
})
// }}}
r.Dispatch("POST /v2/agents/:uuid/(show|hide)", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
agent, err := c.db.GetAgent(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
if agent == nil {
r.Fail(route.NotFound(nil, "No such agent"))
return
}
agent.Hidden = (r.Args[2] == "hide")
if err := c.db.UpdateAgent(agent); err != nil {
r.Fail(route.Oops(err, "Unable to set agent visibility"))
return
}
if agent.Hidden {
r.Success("Agent is now visible only to SHIELD site engineers")
} else {
r.Success("Agent is now visible to everyone")
}
})
// }}}
r.Dispatch("POST /v2/agents/:uuid/resync", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
agent, err := c.db.GetAgent(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
if agent == nil {
r.Fail(route.NotFound(nil, "No such agent"))
return
}
c.ScheduleAgentStatusCheckTasks(&db.AgentFilter{UUID: agent.UUID})
r.Success("Ad hoc agent resynchronization underway")
})
// }}}
r.Dispatch("GET /v2/tenants", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
limit, err := strconv.Atoi(r.Param("limit", "0"))
if err != nil || limit < 0 {
r.Fail(route.Bad(err, "Invalid limit parameter given"))
return
}
tenants, err := c.db.GetAllTenants(&db.TenantFilter{
UUID: r.Param("uuid", ""),
Name: r.Param("name", ""),
ExactMatch: r.ParamIs("exact", "t"),
Limit: limit,
})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenants information"))
return
}
r.OK(tenants)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid", func(r *route.Request) { // {{{
if !c.CanManageTenants(r, r.Args[1]) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant information"))
return
}
if tenant == nil {
r.Fail(route.NotFound(nil, "No such tenant"))
return
}
tenant.Members, err = c.db.GetUsersForTenant(tenant.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant memberships information"))
return
}
r.OK(tenant)
})
// }}}
r.Dispatch("POST /v2/tenants", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
var in struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Users []struct {
UUID string `json:"uuid"`
Account string `json:"account"`
Role string `json:"role"`
} `json:"users"`
}
if !r.Payload(&in) {
return
}
if r.Missing("name", in.Name) {
return
}
if strings.ToLower(in.Name) == "system" {
r.Fail(route.Bad(nil, "tenant name 'system' is reserved"))
return
}
t, err := c.db.CreateTenant(&db.Tenant{
UUID: in.UUID,
Name: in.Name,
})
if t == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new tenant '%s'", in.Name))
return
}
for _, u := range in.Users {
user, err := c.db.GetUserByID(u.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unrecognized user account '%s'", user))
return
}
if user == nil {
r.Fail(route.Oops(err, "Unrecognized user account '%s'", user))
return
}
if user.Backend != "local" {
r.Fail(route.Oops(nil, "Unable to invite '%s@%s' to tenant '%s' - only local users can be invited.", user.Account, user.Backend, t.Name))
return
}
err = c.db.AddUserToTenant(u.UUID, t.UUID, u.Role)
if err != nil {
r.Fail(route.Oops(err, "Unable to invite '%s' to tenant '%s'", user.Account, t.Name))
return
}
}
r.OK(t)
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/invite", func(r *route.Request) { // {{{
if !c.CanManageTenants(r, r.Args[1]) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to update tenant memberships information"))
return
}
if tenant == nil {
r.Fail(route.NotFound(nil, "No such tenant"))
return
}
var in struct {
Users []struct {
UUID string `json:"uuid"`
Account string `json:"account"`
Role string `json:"role"`
} `json:"users"`
}
if !r.Payload(&in) {
return
}
for _, u := range in.Users {
user, err := c.db.GetUserByID(u.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unrecognized user account '%s'", user))
return
}
if user == nil {
r.Fail(route.Oops(err, "Unrecognized user account '%s'", user))
return
}
if user.Backend != "local" {
r.Fail(route.Oops(nil, "Unable to invite '%s@%s' to tenant '%s' - only local users can be invited.", user.Account, user.Backend, tenant.Name))
return
}
err = c.db.AddUserToTenant(u.UUID, tenant.UUID, u.Role)
if err != nil {
r.Fail(route.Oops(err, "Unable to invite '%s' to tenant '%s'", user.Account, tenant.Name))
return
}
}
r.Success("Invitations sent")
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/banish", func(r *route.Request) { // {{{
if !c.CanManageTenants(r, r.Args[1]) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to update tenant memberships information"))
return
}
if tenant == nil {
r.Fail(route.NotFound(nil, "No such tenant"))
return
}
var in struct {
Users []struct {
UUID string `json:"uuid"`
Account string `json:"account"`
} `json:"users"`
}
if !r.Payload(&in) {
return
}
for _, u := range in.Users {
user, err := c.db.GetUserByID(u.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unrecognized user account '%s'", user))
return
}
if user == nil {
r.Fail(route.Oops(err, "Unrecognized user account '%s'", user))
return
}
if user.Backend != "local" {
r.Fail(route.Oops(nil, "Unable to banish '%s@%s' from tenant '%s' - only local users can be banished.", user.Account, user.Backend, tenant.Name))
return
}
err = c.db.RemoveUserFromTenant(u.UUID, tenant.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to banish '%s' from tenant '%s'", user.Account, tenant.Name))
return
}
}
r.Success("Banishments served.")
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant information"))
return
}
if tenant == nil {
r.Fail(route.NotFound(nil, "No such tenant"))
return
}
tenant.Members, err = c.db.GetUsersForTenant(tenant.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant memberships information"))
return
}
r.OK(tenant)
})
// }}}
r.Dispatch("PATCH /v2/tenants/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
var in struct {
Name string `json:"name"`
}
if !r.Payload(&in) {
return
}
if r.Missing("name", in.Name) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant information"))
return
}
if tenant == nil {
r.Fail(route.NotFound(err, "No such tenant"))
return
}
if in.Name != "" {
tenant.Name = in.Name
}
t, err := c.db.UpdateTenant(tenant)
if err != nil {
r.Fail(route.Oops(err, "Unable to update tenant '%s'", in.Name))
return
}
r.OK(t)
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemManager(r) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant information"))
return
}
if tenant == nil {
r.Fail(route.NotFound(nil, "Tenant not found"))
return
}
if err := c.db.DeleteTenant(tenant, r.ParamIs("recurse", "t")); err != nil {
r.Fail(route.Oops(err, "Unable to delete tenant '%s' (%s)", r.Args[1], tenant.Name))
return
}
r.Success("Successfully deleted tenant '%s' (%s)", r.Args[1], tenant.Name)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/agents", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
agents, err := c.db.GetAllAgents(&db.AgentFilter{
SkipHidden: true,
})
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
r.OK(agents)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/agents/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
agent, err := c.db.GetAgent(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
if agent == nil || agent.Hidden {
r.Fail(route.NotFound(nil, "No such agent"))
return
}
raw, err := agent.Metadata()
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve agent information"))
return
}
resp := struct {
Agent db.Agent `json:"agent"`
Metadata map[string]interface{} `json:"metadata"`
}{
Agent: *agent,
Metadata: raw,
}
r.OK(resp)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/targets", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
targets, err := c.db.GetAllTargets(
&db.TargetFilter{
ForTenant: r.Args[1],
SkipUsed: r.ParamIs("unused", "t"),
SkipUnused: r.ParamIs("unused", "f"),
UUID: r.Param("uuid", ""),
SearchName: r.Param("name", ""),
ForPlugin: r.Param("plugin", ""),
ExactMatch: r.ParamIs("exact", "t"),
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve targets information"))
return
}
r.OK(targets)
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/targets", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant information"))
return
}
if tenant == nil {
r.Fail(route.NotFound(nil, "No such tenant"))
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Compression string `json:"compression"`
Plugin string `json:"plugin"`
Agent string `json:"agent"`
Config map[string]interface{} `json:"config"`
endpoint string
}
if !r.Payload(&in) {
return
}
if in.Config != nil {
b, err := json.Marshal(in.Config)
if err != nil {
r.Fail(route.Oops(err, "Unable to create target"))
return
}
in.endpoint = string(b)
} else {
in.endpoint = "{}"
}
if r.Missing("name", in.Name, "plugin", in.Plugin, "agent", in.Agent) {
return
}
if in.Compression == "" {
in.Compression = DefaultCompressionType
}
if !ValidCompressionType(in.Compression) {
r.Fail(route.Bad(err, "Invalid compression type '%s'", in.Compression))
return
}
if r.ParamIs("test", "t") {
r.Success("validation suceeded (request made in ?test=t mode)")
return
}
if !ValidCompressionType(in.Compression) {
r.Fail(route.Bad(err, "Invalid compression type '%s'", in.Compression))
return
}
target, err := c.db.CreateTarget(&db.Target{
TenantUUID: r.Args[1],
Name: in.Name,
Summary: in.Summary,
Plugin: in.Plugin,
Config: in.Config,
Agent: in.Agent,
Compression: in.Compression,
Healthy: true,
})
if target == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new data target"))
return
}
r.OK(target)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/targets/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
target, err := c.db.GetTarget(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve target information"))
return
}
if target == nil || target.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such target"))
return
}
r.OK(target)
})
// }}}
r.Dispatch("PUT /v2/tenants/:uuid/targets/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
target, err := c.db.GetTarget(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve target information"))
return
}
if target == nil || target.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such target"))
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Compression string `json:"compression"`
Plugin string `json:"plugin"`
Endpoint string `json:"endpoint"`
Agent string `json:"agent"`
Config map[string]interface{} `json:"config"`
}
if !r.Payload(&in) {
return
}
if in.Endpoint == "" && in.Config != nil {
b, err := json.Marshal(in.Config)
if err != nil {
r.Fail(route.Oops(err, "Unable to create target"))
}
in.Endpoint = string(b)
}
if in.Name != "" {
target.Name = in.Name
}
if in.Summary != "" {
target.Summary = in.Summary
}
if in.Plugin != "" {
target.Plugin = in.Plugin
}
if in.Config != nil {
target.Config = in.Config
}
if in.Agent != "" {
target.Agent = in.Agent
}
if in.Compression != "" {
if !ValidCompressionType(in.Compression) {
r.Fail(route.Bad(err, "Invalid compression type '%s'", in.Compression))
return
}
target.Compression = in.Compression
}
if err := c.db.UpdateTarget(target); err != nil {
r.Fail(route.Oops(err, "Unable to update target"))
return
}
r.Success("Updated target successfully")
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid/targets/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
target, err := c.db.GetTarget(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve target information"))
return
}
if target == nil || target.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such target"))
return
}
deleted, err := c.db.DeleteTarget(target.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to delete target"))
return
}
if !deleted {
r.Fail(route.Forbidden(nil, "The target cannot be deleted at this time"))
return
}
r.Success("Target deleted successfully")
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/stores", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
stores, err := c.db.GetAllStores(
&db.StoreFilter{
SkipUsed: r.ParamIs("unused", "t"),
SkipUnused: r.ParamIs("unused", "f"),
UUID: r.Param("uuid", ""),
SearchName: r.Param("name", ""),
ForPlugin: r.Param("plugin", ""),
ExactMatch: r.ParamIs("exact", "t"),
ForTenant: r.Args[1],
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage systems information"))
return
}
r.OK(stores)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/stores/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
store, err := c.db.GetStore(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such storage system"))
return
}
r.OK(store)
})
// }}}""
r.Dispatch("GET /v2/tenants/:uuid/stores/:uuid/config", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
store, err := c.db.GetStore(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such storage system"))
return
}
config, err := store.Configuration(c.db, c.CanSeeCredentials(r, store.TenantUUID))
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
r.OK(config)
})
// }}}""
r.Dispatch("POST /v2/tenants/:uuid/stores", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Agent string `json:"agent"`
Plugin string `json:"plugin"`
Threshold int64 `json:"threshold"`
Config map[string]interface{} `json:"config"`
}
if !r.Payload(&in) {
return
}
if r.Missing("name", in.Name, "agent", in.Agent, "plugin", in.Plugin, "threshold", fmt.Sprint(in.Threshold)) {
return
}
tenant, err := c.db.GetTenant(r.Args[1])
if tenant == nil || err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if r.ParamIs("test", "t") {
r.Success("validation suceeded (request made in ?test=t mode)")
return
}
store, err := c.db.CreateStore(&db.Store{
TenantUUID: tenant.UUID,
Name: in.Name,
Summary: in.Summary,
Agent: in.Agent,
Plugin: in.Plugin,
Config: in.Config,
Threshold: in.Threshold,
Healthy: true, /* let's be optimistic */
})
if store == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new storage system"))
return
}
if _, err := c.db.CreateTestStoreTask("system", store); err != nil {
log.Errorf("failed to schedule storage test task (non-critical) for %s (%s): %s",
store.Name, store.UUID, err)
}
r.OK(store)
})
// }}}
r.Dispatch("PUT /v2/tenants/:uuid/stores/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Agent string `json:"agent"`
Plugin string `json:"plugin"`
Threshold int64 `json:"threshold"`
Config map[string]interface{} `json:"config"`
}
if !r.Payload(&in) {
r.Fail(route.Bad(nil, "Unable to update storage system"))
return
}
store, err := c.db.GetStore(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(err, "No such storage system"))
return
}
if in.Name != "" {
store.Name = in.Name
}
if in.Summary != "" {
store.Summary = in.Summary
}
if in.Agent != "" {
store.Agent = in.Agent
}
if in.Plugin != "" {
store.Plugin = in.Plugin
}
if in.Threshold != 0 {
store.Threshold = in.Threshold
}
if in.Config != nil {
store.Config = in.Config
}
if err := c.db.UpdateStore(store); err != nil {
r.Fail(route.Oops(err, "Unable to update storage system"))
return
}
store, err = c.db.GetStore(store.UUID)
if store == nil || err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if _, err := c.db.CreateTestStoreTask("system", store); err != nil {
log.Errorf("failed to schedule storage test task (non-critical) for %s (%s): %s",
store.Name, store.UUID, err)
}
r.OK(store)
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid/stores/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
store, err := c.db.GetStore(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(err, "No such storage system"))
return
}
deleted, err := c.db.DeleteStore(store.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to delete storage system"))
return
}
if !deleted {
r.Fail(route.Bad(nil, "The storage system cannot be deleted at this time"))
return
}
r.Success("Storage system deleted successfully")
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/jobs", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
jobs, err := c.db.GetAllJobs(
&db.JobFilter{
ForTenant: r.Args[1],
SkipPaused: r.ParamIs("paused", "f"),
SkipUnpaused: r.ParamIs("paused", "t"),
UUID: r.Param("uuid", ""),
SearchName: r.Param("name", ""),
ForTarget: r.Param("target", ""),
ForStore: r.Param("store", ""),
ExactMatch: r.ParamIs("exact", "t"),
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve tenant job information."))
return
}
r.OK(jobs)
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/jobs", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Schedule string `json:"schedule"`
Paused bool `json:"paused"`
Store string `json:"store"`
Target string `json:"target"`
Retain string `json:"retain"`
FixedKey bool `json:"fixed_key"`
}
if !r.Payload(&in) {
return
}
if r.Missing("name", in.Name, "store", in.Store, "target", in.Target, "schedule", in.Schedule, "retain", in.Retain) {
return
}
sched, err := timespec.Parse(in.Schedule)
if err != nil {
r.Fail(route.Oops(err, "Invalid or malformed SHIELD Job Schedule '%s'", in.Schedule))
return
}
keepdays := util.ParseRetain(in.Retain)
if keepdays < 0 {
r.Fail(route.Oops(nil, "Invalid or malformed SHIELD Job Archive Retention Period '%s'", in.Retain))
return
}
if keepdays < c.Config.Limit.Retention.Min {
r.Fail(route.Oops(nil, "SHIELD Job Archive Retention Period '%s' is too short, archives must be kept for a minimum of %d days", in.Retain, c.Config.Limit.Retention.Min))
return
}
if keepdays > c.Config.Limit.Retention.Max {
r.Fail(route.Oops(nil, "SHIELD Job Archive Retention Period '%s' is too long, archives may be kept for a maximum of %d days", in.Retain, c.Config.Limit.Retention.Max))
return
}
keepn := sched.KeepN(keepdays)
job, err := c.db.CreateJob(&db.Job{
TenantUUID: r.Args[1],
Name: in.Name,
Summary: in.Summary,
Schedule: in.Schedule,
KeepDays: keepdays,
KeepN: keepn,
Paused: in.Paused,
StoreUUID: in.Store,
TargetUUID: in.Target,
FixedKey: in.FixedKey,
})
if job == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new job"))
return
}
r.OK(job)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/jobs/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
job, err := c.db.GetJob(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve job information"))
return
}
if job == nil || job.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such job"))
return
}
r.OK(job)
})
// }}}
r.Dispatch("PUT /v2/tenants/:uuid/jobs/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Schedule string `json:"schedule"`
Retain string `json:"retain"`
StoreUUID string `json:"store"`
TargetUUID string `json:"target"`
FixedKey *bool `json:"fixed_key"`
}
if !r.Payload(&in) {
return
}
job, err := c.db.GetJob(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve job information"))
return
}
if job == nil || job.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such job"))
return
}
if in.Name != "" {
job.Name = in.Name
}
if in.Summary != "" {
job.Summary = in.Summary
}
if in.Schedule != "" {
if _, err := timespec.Parse(in.Schedule); err != nil {
r.Fail(route.Oops(err, "Invalid or malformed SHIELD Job Schedule '%s'", in.Schedule))
return
}
job.Schedule = in.Schedule
}
if in.Retain != "" {
keepdays := util.ParseRetain(in.Retain)
if keepdays < 0 {
r.Fail(route.Oops(nil, "Invalid or malformed SHIELD Job Archive Retention Period '%s'", in.Retain))
return
}
if keepdays < c.Config.Limit.Retention.Min {
r.Fail(route.Oops(nil, "SHIELD Job Archive Retention Period '%s' is too short, archives must be kept for a minimum of %d days", in.Retain, c.Config.Limit.Retention.Min))
return
}
if keepdays > c.Config.Limit.Retention.Max {
r.Fail(route.Oops(nil, "SHIELD Job Archive Retention Period '%s' is too long, archives may be kept for a maximum of %d days", in.Retain, c.Config.Limit.Retention.Max))
return
}
job.KeepDays = keepdays
job.KeepN = -1
if sched, err := timespec.Parse(job.Schedule); err == nil {
job.KeepN = sched.KeepN(job.KeepDays)
}
}
job.TargetUUID = job.Target.UUID
if in.TargetUUID != "" {
job.TargetUUID = in.TargetUUID
}
job.StoreUUID = job.Store.UUID
if in.StoreUUID != "" {
job.StoreUUID = in.StoreUUID
}
if in.FixedKey != nil {
job.FixedKey = *in.FixedKey
}
if err := c.db.UpdateJob(job); err != nil {
r.Fail(route.Oops(err, "Unable to update job"))
return
}
if in.Schedule != "" {
if spec, err := timespec.Parse(in.Schedule); err == nil {
if next, err := spec.Next(time.Now()); err == nil {
c.db.RescheduleJob(job, next)
}
}
}
r.Success("Updated job successfully")
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid/jobs/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantEngineer(r, r.Args[1]) {
return
}
job, err := c.db.GetJob(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve job information"))
return
}
if job == nil || job.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such job"))
return
}
deleted, err := c.db.DeleteJob(job.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to delete job"))
return
}
if !deleted {
r.Fail(route.Forbidden(nil, "The job cannot be deleted at this time"))
return
}
r.Success("Job deleted successfully")
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/jobs/:uuid/run", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
job, err := c.db.GetJob(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve job information"))
return
}
if job == nil || job.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such job"))
return
}
user, _ := c.AuthenticatedUser(r)
task, err := c.db.CreateBackupTask(fmt.Sprintf("%s@%s", user.Account, user.Backend), job)
if task == nil || err != nil {
r.Fail(route.Oops(err, "Unable to schedule ad hoc backup job run"))
return
}
var out struct {
OK string `json:"ok"`
TaskUUID string `json:"task_uuid"`
}
out.OK = "Scheduled ad hoc backup job run"
out.TaskUUID = task.UUID
r.OK(out)
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/jobs/:uuid/pause", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
job, err := c.db.GetJob(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve job information"))
return
}
if job == nil || job.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such job"))
return
}
if _, err = c.db.PauseJob(job.UUID); err != nil {
r.Fail(route.Oops(err, "Unable to pause job"))
return
}
r.Success("Paused job successfully")
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/jobs/:uuid/unpause", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
job, err := c.db.GetJob(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve job information"))
return
}
if job == nil || job.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such job"))
return
}
if _, err = c.db.UnpauseJob(job.UUID); err != nil {
r.Fail(route.Oops(err, "Unable to unpause job"))
return
}
r.Success("Unpaused job successfully")
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/tasks", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
limit, err := strconv.Atoi(r.Param("limit", "30"))
if err != nil || limit < 0 || limit > 30 {
r.Fail(route.Bad(err, "Invalid limit parameter given"))
return
}
// check to see if we're offseting task requests
paginationDate, err := strconv.ParseInt(r.Param("before", "0"), 10, 64)
if err != nil || paginationDate < 0 {
r.Fail(route.Bad(err, "Invalid before parameter given"))
return
}
tasks, err := c.db.GetAllTasks(
&db.TaskFilter{
UUID: r.Param("uuid", ""),
ExactMatch: r.ParamIs("exact", "t"),
SkipActive: r.ParamIs("active", "f"),
SkipInactive: r.ParamIs("active", "t"),
ForStatus: r.Param("status", ""),
ForTarget: r.Param("target", ""),
ForStore: r.Param("store", ""),
ForOp: r.Param("type", ""),
ForTenant: r.Args[1],
Limit: limit,
Before: paginationDate,
StartedAfter: r.ParamDuration("started_after"),
StoppedAfter: r.ParamDuration("stopped_after"),
StartedBefore: r.ParamDuration("started_before"),
StoppedBefore: r.ParamDuration("stopped_before"),
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve task information"))
return
}
if !c.CanSeeCredentials(r, r.Args[1]) {
c.db.RedactAllTaskLogs(tasks)
}
r.OK(tasks)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/tasks/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
task, err := c.db.GetTask(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve task information"))
return
}
if task == nil || task.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(err, "No such task"))
return
}
if !c.CanSeeCredentials(r, r.Args[1]) {
c.db.RedactTaskLog(task)
}
r.OK(task)
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid/tasks/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
task, err := c.db.GetTask(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve task information"))
return
}
if task == nil || task.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(err, "No such task"))
return
}
if err := c.db.CancelTask(task.UUID, time.Now()); err != nil {
r.Fail(route.Oops(err, "Unable to cancel task"))
return
}
r.Success("Canceled task successfully")
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/archives", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
limit, err := strconv.Atoi(r.Param("limit", "0"))
if err != nil || limit < 0 {
r.Fail(route.Bad(err, "Invalid limit parameter given"))
return
}
status := []string{}
if s := r.Param("status", ""); s != "" {
status = append(status, s)
}
archives, err := c.db.GetAllArchives(
&db.ArchiveFilter{
UUID: r.Param("uuid", ""),
ExactMatch: r.ParamIs("exact", "t"),
ForTenant: r.Args[1],
ForTarget: r.Param("target", ""),
ForStore: r.Param("store", ""),
Before: r.ParamDate("before"),
After: r.ParamDate("after"),
WithStatus: status,
Limit: limit,
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve backup archives information"))
return
}
r.OK(archives)
})
// }}}
r.Dispatch("GET /v2/tenants/:uuid/archives/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
archive, err := c.db.GetArchive(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve backup archive information"))
return
}
if archive == nil || archive.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "Archive Not Found"))
return
}
r.OK(archive)
})
// }}}
r.Dispatch("PUT /v2/tenants/:uuid/archives/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
var in struct {
Notes string `json:"notes"`
}
if !r.Payload(&in) {
return
}
archive, err := c.db.GetArchive(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve backup archive information"))
return
}
if archive == nil || archive.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such backup archive"))
return
}
if r.Missing("notes", in.Notes) {
return
}
archive.Notes = in.Notes
if err := c.db.UpdateArchive(archive); err != nil {
r.Fail(route.Oops(err, "Unable to update backup archive"))
return
}
r.OK(archive)
})
// }}}
r.Dispatch("DELETE /v2/tenants/:uuid/archives/:uuid", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
archive, err := c.db.GetArchive(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve backup archive information"))
return
}
if archive == nil || archive.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such backup archive"))
return
}
if archive.Status != "valid" {
r.Fail(route.Bad(err, "The backup archive could not be deleted at this time. Archive is already %s", archive.Status))
}
err = c.db.ManuallyPurgeArchive(archive.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to delete backup archive"))
return
}
err = c.vault.Delete(fmt.Sprintf("secret/archives/%s", archive.UUID))
if err != nil {
log.Errorf("failed to delete encryption parameters for archive %s: %s", archive.UUID, err)
}
r.Success("Archive deleted successfully")
})
// }}}
r.Dispatch("POST /v2/tenants/:uuid/archives/:uuid/restore", func(r *route.Request) { // {{{
if c.IsNotTenantOperator(r, r.Args[1]) {
return
}
var in struct {
Target string `json:"target"`
}
if !r.Payload(&in) {
return
}
archive, err := c.db.GetArchive(r.Args[2])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve backup archive information"))
return
}
if archive == nil || archive.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such backup archive"))
return
}
if in.Target == "" {
in.Target = archive.TargetUUID
}
target, err := c.db.GetTarget(in.Target)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve backup archive information"))
return
}
if target == nil || archive.TenantUUID != r.Args[1] {
r.Fail(route.NotFound(nil, "No such backup archive"))
return
}
user, _ := c.AuthenticatedUser(r)
task, err := c.db.CreateRestoreTask(fmt.Sprintf("%s@%s", user.Account, user.Backend), archive, target)
if task == nil || err != nil {
r.Fail(route.Oops(err, "Unable to schedule a restore task"))
return
}
if !c.CanSeeCredentials(r, r.Args[1]) {
c.db.RedactTaskLog(task)
}
r.OK(task)
})
// }}}
r.Dispatch("POST /v2/auth/login", func(r *route.Request) { // {{{
var in struct {
Username string
Password string
}
if !r.Payload(&in) {
return
}
if r.Missing("username", in.Username, "password", in.Password) {
return
}
user, err := c.db.GetUser(in.Username, "local")
if err != nil {
r.Fail(route.Oops(err, "Unable to log you in"))
return
}
if user == nil || !user.Authenticate(in.Password) {
r.Fail(route.Errorf(401, nil, "Incorrect username or password"))
return
}
session, err := c.db.CreateSession(&db.Session{
UserUUID: user.UUID,
IP: r.RemoteIP(),
UserAgent: r.UserAgent(),
})
if err != nil {
r.Fail(route.Oops(err, "Unable to log you in"))
return
}
if session == nil {
r.Fail(route.Oops(fmt.Errorf("no session created"), "Unable to log you in"))
return
}
id, err := c.checkAuth(user)
if err != nil || id == nil {
r.Fail(route.Oops(err, "Unable to log you in"))
}
r.SetSession(session.UUID)
r.OK(id)
})
// }}}
r.Dispatch("GET /v2/auth/logout", func(r *route.Request) { // {{{
if err := c.db.ClearSession(r.SessionID()); err != nil {
r.Fail(route.Oops(err, "Unable to log you out"))
return
}
r.ClearSession()
r.Success("Successfully logged out")
})
// }}}
r.Dispatch("GET /v2/auth/id", func(r *route.Request) { // {{{
user, _ := c.AuthenticatedUser(r)
if id, _ := c.checkAuth(user); id != nil {
r.OK(id)
return
}
r.OK(struct {
Unauthenticated bool `json:"unauthenticated"`
}{true})
})
// }}}
r.Dispatch("POST /v2/auth/passwd", func(r *route.Request) { // {{{
if c.IsNotAuthenticated(r) {
return
}
var in struct {
OldPassword string `json:"old_password"`
NewPassword string `json:"new_password"`
}
if !r.Payload(&in) {
return
}
user, _ := c.AuthenticatedUser(r)
if !user.Authenticate(in.OldPassword) {
r.Fail(route.Forbidden(nil, "Incorrect password"))
return
}
user.SetPassword(in.NewPassword)
if err := c.db.UpdateUser(user); err != nil {
r.Fail(route.Oops(err, "Unable to change your password"))
return
}
r.Success("Password changed successfully")
})
// }}}
r.Dispatch("PATCH /v2/auth/user/settings", func(r *route.Request) { // {{{
var in struct {
DefaultTenant string `json:"default_tenant"`
}
if !r.Payload(&in) {
return
}
user, err := c.AuthenticatedUser(r)
if err != nil {
r.Fail(route.Oops(err, "Unable to save settings"))
return
}
if in.DefaultTenant != "" {
user.DefaultTenant = in.DefaultTenant
}
if err := c.db.UpdateUserSettings(user); err != nil {
r.Fail(route.Oops(err, "Unable to save settings"))
return
}
r.Success("Settings saved")
})
// }}}
r.Dispatch("GET /v2/global/stores", func(r *route.Request) { // {{{
if c.IsNotAuthenticated(r) {
return
}
stores, err := c.db.GetAllStores(
&db.StoreFilter{
SkipUsed: r.ParamIs("unused", "t"),
SkipUnused: r.ParamIs("unused", "f"),
UUID: r.Param("uuid", ""),
SearchName: r.Param("name", ""),
ForPlugin: r.Param("plugin", ""),
ExactMatch: r.ParamIs("exact", "t"),
ForTenant: db.GlobalTenantUUID,
},
)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage systems information"))
return
}
r.OK(stores)
})
// }}}
r.Dispatch("GET /v2/global/stores/:uuid", func(r *route.Request) { // {{{
if c.IsNotAuthenticated(r) {
return
}
store, err := c.db.GetStore(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != db.GlobalTenantUUID {
r.Fail(route.NotFound(nil, "No such storage system"))
return
}
r.OK(store)
})
// }}}""
r.Dispatch("GET /v2/global/stores/:uuid/config", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
store, err := c.db.GetStore(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != db.GlobalTenantUUID {
r.Fail(route.NotFound(nil, "No such storage system"))
return
}
config, err := store.Configuration(c.db, true)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
r.OK(config)
})
// }}}""
r.Dispatch("POST /v2/global/stores", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Agent string `json:"agent"`
Plugin string `json:"plugin"`
Threshold int64 `json:"threshold"`
Config map[string]interface{} `json:"config"`
}
if !r.Payload(&in) {
return
}
if r.Missing("name", in.Name, "agent", in.Agent, "plugin", in.Plugin, "threshold", fmt.Sprint(in.Threshold)) {
return
}
store, err := c.db.CreateStore(&db.Store{
TenantUUID: db.GlobalTenantUUID,
Name: in.Name,
Summary: in.Summary,
Agent: in.Agent,
Plugin: in.Plugin,
Config: in.Config,
Threshold: in.Threshold,
Healthy: true, /* let's be optimistic */
})
if store == nil || err != nil {
r.Fail(route.Oops(err, "Unable to create new storage system"))
return
}
r.OK(store)
})
// }}}
r.Dispatch("PUT /v2/global/stores/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
var in struct {
Name string `json:"name"`
Summary string `json:"summary"`
Agent string `json:"agent"`
Plugin string `json:"plugin"`
Threshold int64 `json:"threshold"`
Config map[string]interface{} `json:"config"`
}
if !r.Payload(&in) {
r.Fail(route.Bad(nil, "Unable to update storage system"))
return
}
store, err := c.db.GetStore(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != db.GlobalTenantUUID {
r.Fail(route.NotFound(err, "No such storage system"))
return
}
if in.Name != "" {
store.Name = in.Name
}
if in.Summary != "" {
store.Summary = in.Summary
}
if in.Agent != "" {
store.Agent = in.Agent
}
if in.Plugin != "" {
store.Plugin = in.Plugin
}
if in.Threshold != 0 {
store.Threshold = in.Threshold
}
if in.Config != nil {
store.Config = in.Config
}
if err := c.db.UpdateStore(store); err != nil {
r.Fail(route.Oops(err, "Unable to update storage system"))
return
}
store, err = c.db.GetStore(store.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
r.OK(store)
})
// }}}
r.Dispatch("DELETE /v2/global/stores/:uuid", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
store, err := c.db.GetStore(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve storage system information"))
return
}
if store == nil || store.TenantUUID != db.GlobalTenantUUID {
r.Fail(route.NotFound(err, "No such storage system"))
return
}
deleted, err := c.db.DeleteStore(store.UUID)
if err != nil {
r.Fail(route.Oops(err, "Unable to delete storage system"))
return
}
if !deleted {
r.Fail(route.Bad(nil, "The storage system cannot be deleted at this time"))
return
}
r.Success("Storage system deleted successfully")
})
// }}}
r.Dispatch("GET /v2/fixups", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
fixups, err := c.db.GetAllFixups(nil)
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve data fixups information"))
return
}
r.OK(fixups)
})
// }}}
r.Dispatch("GET /v2/fixups/:id", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
fixup, err := c.db.GetFixup(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve data fixup"))
return
}
r.OK(fixup)
})
// }}}
r.Dispatch("POST /v2/fixups/:id/apply", func(r *route.Request) { // {{{
if c.IsNotSystemEngineer(r) {
return
}
fixup, err := c.db.GetFixup(r.Args[1])
if err != nil {
r.Fail(route.Oops(err, "Unable to retrieve data fixups information"))
return
}
err = fixup.ReApply(c.db)
if err != nil {
r.Fail(route.Oops(err, "Unable to apply data fixup successfully"))
return
}
r.Success("applied fixup")
})
// }}}
r.Dispatch("POST /v2/bootstrap/restore", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
log.Infof("BOOTSTRAP: streaming uploaded archive file...")
in, _, err := r.Req.FormFile("archive")
if err != nil {
r.Fail(route.Oops(err, "Unable to stream uploaded backup archive"))
return
}
log.Infof("BOOTSTRAP: deriving encryption parameters from provided fixed key...")
/* derive encryption parameters from fixed key */
key := regexp.MustCompile(`\s`).ReplaceAll([]byte(r.Req.FormValue("key")), nil)
if !regexp.MustCompile(`^[A-Fa-f0-9]*$`).Match(key) || len(key) != 1024 {
r.Fail(route.Oops(nil, "Invalid SHIELD Fixed Key (must be 1024 hex digits)"))
return
}
enc, err := vault.DeriveFixedParameters(key)
if err != nil {
r.Fail(route.Oops(err, "Invalid SHIELD Fixed Key (unable to use it to derive encryption parameters)"))
return
}
/* execute the shield-recover command */
log.Infof("BOOTSTRAP: executing shield-recover process...")
cmd := exec.Command("shield-recover")
cmd.Stdin = in
cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%s:%s", strings.Join(c.Config.PluginPaths, ":"), os.Getenv("PATH")))
cmd.Env = append(cmd.Env, fmt.Sprintf("SHIELD_DATA_DIR=%s", c.Config.DataDir))
cmd.Env = append(cmd.Env, fmt.Sprintf("SHIELD_RESTARTER=%s", c.Config.Bootstrapper))
cmd.Env = append(cmd.Env, fmt.Sprintf("SHIELD_ENCRYPT_TYPE=%s", enc.Type))
cmd.Env = append(cmd.Env, fmt.Sprintf("SHIELD_ENCRYPT_KEY=%s", enc.Key))
cmd.Env = append(cmd.Env, fmt.Sprintf("SHIELD_ENCRYPT_IV=%s", enc.IV))
c.bailout = true
if err := cmd.Run(); err != nil {
log.Errorf("BOOTSTRAP: command exited abnormally (%s)", err)
r.Fail(route.Oops(err, "SHIELD Restore Failed: You may be in a broken state."))
return
}
log.Errorf("BOOTSTRAP: RESTORED SUCCESSFULLY; removing bootstrap.log")
os.Remove(c.DataFile("bootstrap.old"))
os.Rename(c.DataFile("bootstrap.log"), c.DataFile("bootstrap.old"))
r.Success("SHIELD successfully restored")
return
}) // }}}
r.Dispatch("GET /v2/bootstrap/log", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
b, err := ioutil.ReadFile(c.DataFile("bootstrap.log"))
if err != nil {
log.Errorf("unable to read bootstrap.log: %s", err)
}
r.OK(struct {
Log string `json:"log"`
}{Log: string(b)})
}) // }}}
r.Dispatch("GET /v2/export", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
st, err := c.vault.StatusString()
if err != nil {
r.Fail(route.Oops(err, "Failed to export SHIELD data"))
return
}
if st != "unlocked" {
r.Fail(route.Oops(fmt.Errorf("vault is locked"), "Failed to export SHIELD data"))
return
}
if out := r.JSONEncoder(); out != nil {
c.db.Export(out, c.vault, r.Param("task", ""))
} else {
r.Fail(route.Oops(nil, "Failed to export SHIELD data"))
}
}) // }}}
r.Dispatch("POST /v2/import", func(r *route.Request) { // {{{
if c.IsNotSystemAdmin(r) {
return
}
st, err := c.vault.StatusString()
if err != nil {
r.Fail(route.Oops(err, "Failed to import SHIELD data"))
return
}
if st != "unlocked" {
r.Fail(route.Oops(fmt.Errorf("vault is locked"), "Failed to import SHIELD data"))
return
}
if in := r.JSONDecoder(); in != nil {
err = c.db.Import(in, c.vault, r.Param("key", ""), r.Param("task", ""))
if err != nil {
r.Fail(route.Oops(err, "Failed to import SHIELD data"))
return
}
r.Success("imported successfully: %s %s", r.Param("key", ""), r.Param("task", ""))
} else {
r.Fail(route.Oops(nil, "Failed to import SHIELD data"))
}
}) // }}}
return r
}
func (c *Core) v2copyTarget(dst *v2System, target *db.Target) error {
dst.UUID = target.UUID
dst.Name = target.Name
dst.Notes = target.Summary
dst.OK = true
dst.Compression = target.Compression
jobs, err := c.db.GetAllJobs(&db.JobFilter{ForTarget: target.UUID})
if err != nil {
return err
}
dst.Jobs = make([]v2SystemJob, len(jobs))
for j, job := range jobs {
dst.Jobs[j].UUID = job.UUID
dst.Jobs[j].Schedule = job.Schedule
dst.Jobs[j].From = job.Target.Plugin
dst.Jobs[j].To = job.Store.Plugin
dst.Jobs[j].OK = job.Healthy
dst.Jobs[j].Store.UUID = job.Store.UUID
dst.Jobs[j].Store.Name = job.Store.Name
dst.Jobs[j].Store.Summary = job.Store.Summary
dst.Jobs[j].Store.Healthy = job.Store.Healthy
if !job.Healthy {
dst.OK = false
}
tspec, err := timespec.Parse(job.Schedule)
if err != nil {
return err
}
switch tspec.Interval {
case timespec.Minutely:
dst.Jobs[j].Keep.N = dst.Jobs[j].Keep.Days * 1440 / int(tspec.Cardinality)
case timespec.Hourly:
if tspec.Cardinality == 0 {
dst.Jobs[j].Keep.N = dst.Jobs[j].Keep.Days * 24
} else {
dst.Jobs[j].Keep.N = dst.Jobs[j].Keep.Days * 24 / int(tspec.Cardinality)
}
case timespec.Daily:
dst.Jobs[j].Keep.N = dst.Jobs[j].Keep.Days
case timespec.Weekly:
dst.Jobs[j].Keep.N = dst.Jobs[j].Keep.Days / 7
case timespec.Monthly:
dst.Jobs[j].Keep.N = dst.Jobs[j].Keep.Days / 30
}
}
return nil
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
networking/net/veth/veth.go
|
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"log"
"net"
"os"
"runtime"
"github.com/coreos/rocket/Godeps/_workspace/src/github.com/vishvananda/netlink"
"github.com/coreos/rocket/networking/ipam"
rktnet "github.com/coreos/rocket/networking/net"
"github.com/coreos/rocket/networking/util"
)
func init() {
// this ensures that main runs only on main thread (thread group leader).
// since namespace ops (unshare, setns) are done for a single thread, we
// must ensure that the goroutine does not jump from OS thread to thread
runtime.LockOSThread()
}
type Net struct {
rktnet.Net
IPMasq bool `json:"ipMasq"`
MTU int `json:"mtu"`
}
func setupContVeth(contID, netns, ifName string, mtu int, ipConf *ipam.IPConfig) (string, error) {
var hostVethName string
err := util.WithNetNSPath(netns, func(hostNS *os.File) error {
entropy := contID + ifName
hostVeth, _, err := util.SetupVeth(entropy, ifName, mtu, hostNS)
if err != nil {
return err
}
err = ipam.ApplyIPConfig(ifName, ipConf)
if err != nil {
return err
}
hostVethName = hostVeth.Attrs().Name
return nil
})
return hostVethName, err
}
func setupHostVeth(vethName string, ipConf *ipam.IPConfig) error {
// hostVeth moved namespaces and may have a new ifindex
veth, err := netlink.LinkByName(vethName)
if err != nil {
return fmt.Errorf("failed to lookup %q: %v", vethName, err)
}
// TODO(eyakubovich): IPv6
ipn := &net.IPNet{
IP: ipConf.Gateway,
Mask: net.CIDRMask(31, 32),
}
addr := &netlink.Addr{IPNet: ipn, Label: ""}
if err = netlink.AddrAdd(veth, addr); err != nil {
return fmt.Errorf("failed to add IP addr to veth: %v", err)
}
// dst happens to be the same as IP/net of host veth
if err = util.AddHostRoute(ipn, nil, veth); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to add route on host: %v", err)
}
return nil
}
func cmdAdd(contID, netns, netConf, ifName, args string) error {
conf := Net{}
if err := rktnet.LoadNet(netConf, &conf); err != nil {
return fmt.Errorf("failed to load %q: %v", netConf, err)
}
// run the IPAM plugin and get back the config to apply
ipConf, err := ipam.ExecPluginAdd(conf.IPAM.Type)
if err != nil {
return err
}
hostVethName, err := setupContVeth(contID, netns, ifName, conf.MTU, ipConf)
if err != nil {
return err
}
if err = setupHostVeth(hostVethName, ipConf); err != nil {
return err
}
if conf.IPMasq {
chain := fmt.Sprintf("RKT-%s-%s", conf.Name, contID[:8])
if err = util.SetupIPMasq(ipConf.IP, chain); err != nil {
return err
}
}
return rktnet.PrintIfConfig(&rktnet.IfConfig{
IP: ipConf.IP.IP,
})
}
func cmdDel(contID, netns, netConf, ifName, args string) error {
conf := Net{}
if err := rktnet.LoadNet(netConf, &conf); err != nil {
return fmt.Errorf("failed to load %q: %v", netConf, err)
}
var ipn *net.IPNet
err := util.WithNetNSPath(netns, func(hostNS *os.File) error {
var err error
ipn, err = util.DelLinkByNameAddr(ifName, netlink.FAMILY_V4)
return err
})
if err != nil {
return err
}
if conf.IPMasq {
chain := fmt.Sprintf("RKT-%s-%s", conf.Name, contID[:8])
if err = util.TeardownIPMasq(ipn, chain); err != nil {
return err
}
}
return ipam.ExecPluginDel(conf.IPAM.Type)
}
func main() {
var err error
cmd := os.Getenv("RKT_NETPLUGIN_COMMAND")
contID := os.Getenv("RKT_NETPLUGIN_CONTID")
netns := os.Getenv("RKT_NETPLUGIN_NETNS")
args := os.Getenv("RKT_NETPLUGIN_ARGS")
ifName := os.Getenv("RKT_NETPLUGIN_IFNAME")
netConf := os.Getenv("RKT_NETPLUGIN_NETCONF")
if cmd == "" || contID == "" || netns == "" || ifName == "" || netConf == "" {
log.Printf("Required env variable missing")
log.Print("Env: ", os.Environ())
os.Exit(1)
}
switch cmd {
case "ADD":
err = cmdAdd(contID, netns, netConf, ifName, args)
case "DEL":
err = cmdDel(contID, netns, netConf, ifName, args)
default:
log.Printf("Unknown RKT_NETPLUGIN_COMMAND: %v", cmd)
os.Exit(1)
}
if err != nil {
log.Printf("%v: %v", cmd, err)
os.Exit(1)
}
}
|
[
"\"RKT_NETPLUGIN_COMMAND\"",
"\"RKT_NETPLUGIN_CONTID\"",
"\"RKT_NETPLUGIN_NETNS\"",
"\"RKT_NETPLUGIN_ARGS\"",
"\"RKT_NETPLUGIN_IFNAME\"",
"\"RKT_NETPLUGIN_NETCONF\""
] |
[] |
[
"RKT_NETPLUGIN_ARGS",
"RKT_NETPLUGIN_COMMAND",
"RKT_NETPLUGIN_CONTID",
"RKT_NETPLUGIN_NETCONF",
"RKT_NETPLUGIN_NETNS",
"RKT_NETPLUGIN_IFNAME"
] |
[]
|
["RKT_NETPLUGIN_ARGS", "RKT_NETPLUGIN_COMMAND", "RKT_NETPLUGIN_CONTID", "RKT_NETPLUGIN_NETCONF", "RKT_NETPLUGIN_NETNS", "RKT_NETPLUGIN_IFNAME"]
|
go
| 6 | 0 | |
main.go
|
// 2014 - Mathieu Lonjaret
// The acmetags program prints the tags of the acme windows.
package main
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"strings"
"time"
"9fans.net/go/acme"
)
var (
output = flag.String("o", "", "output file. will only truncate if no error and output is non empty.")
timestamp = flag.Bool("ts", false, "add a timestamp suffix to the output file name")
allTags = flag.Bool("all", false, "print tags of all windows, instead of only \"win\" windows.")
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: acmetags [-all]\n")
flag.PrintDefaults()
os.Exit(2)
}
func hostName() (string, error) {
hostname, err := os.Hostname()
if err == nil && hostname != "" {
return hostname, nil
}
hostname = os.Getenv("HOSTNAME")
if hostname != "" {
return hostname, nil
}
out, err := exec.Command("hostname").Output()
if err == nil && string(out) != "" {
return strings.TrimSpace(string(out)), nil
}
return "", errors.New("all methods to find our hostname failed")
}
func main() {
flag.Usage = usage
flag.Parse()
var hostname string
var err error
if !*allTags {
hostname, err = hostName()
if err != nil {
log.Fatal(err)
}
parts := strings.Split(hostname, ".")
hostname = parts[0]
}
windows, err := acme.Windows()
if err != nil {
log.Fatalf("could not get acme windows: %v", err)
}
isWinHint := "/-" + hostname
var accumTags string
for _, win := range windows {
if !(*allTags || strings.HasSuffix(win.Name, isWinHint)) {
continue
}
w, err := acme.Open(win.ID, nil)
if err != nil {
log.Fatalf("could not open window (%v, %d): %v", win.Name, win.ID, err)
}
tag, err := w.ReadAll("tag")
if err != nil {
log.Fatalf("could not read tags of window (%v, %d): %v", win.Name, win.ID, err)
}
if *output != "" {
accumTags += string(tag) + "\n\n"
continue
}
fmt.Printf("%s\n\n", tag)
}
if *output == "" {
return
}
if *timestamp {
*output += "-" + time.Now().Format(time.RFC3339)
}
if err := ioutil.WriteFile(*output, []byte(accumTags), 0600); err != nil {
log.Fatalf("could not write to output file: %v", err)
}
}
|
[
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
go
| 1 | 0 | |
qa/rpc-tests/maxblocksinflight.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
self.create_callback_map()
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
try:
fail = False
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [ 8, 16, 128, 1024 ]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1<<256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
except AssertionError as e:
print "TEST FAILED: ", e.args
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("GCOIND", "gcoind"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
|
[] |
[] |
[
"GCOIND"
] |
[]
|
["GCOIND"]
|
python
| 1 | 0 | |
alipay/aop/api/request/AlipayFundTransBatchCreatesinglebatchRequest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayFundTransBatchCreatesinglebatchModel import AlipayFundTransBatchCreatesinglebatchModel
class AlipayFundTransBatchCreatesinglebatchRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayFundTransBatchCreatesinglebatchModel):
self._biz_content = value
else:
self._biz_content = AlipayFundTransBatchCreatesinglebatchModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.fund.trans.batch.createsinglebatch'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
source/outreach_tracking/methods_upload_outreach_events.py
|
from pymongo import MongoClient
from pymongo import ReadPreference
import json as _json
import os
import mysql.connector as mysql
import re
import requests
import time
import datetime
requests.packages.urllib3.disable_warnings()
metrics_mysql_password = os.environ["METRICS_MYSQL_PWD"]
kb_outreach_events_url = os.environ["KB_OUTREACH_EVENTS_URL"]
sql_host = os.environ["SQL_HOST"]
query_on = os.environ["QUERY_ON"]
db_connection = mysql.connect(
host=sql_host, user="metrics", passwd=metrics_mysql_password, database="metrics"
)
def quote_strip(string):
"""
helpe function to strip single leading and trailing quote
"""
return re.sub(r'^"|"$', '', string)
def get_outreach_events():
"""
Gets the details for outreach events
"""
params = (("tqx", "out:csv"), ("sheet", "OUTREACH_EVENTS"))
response = requests.get(kb_outreach_events_url, params=params)
if response.status_code != 200:
print(
"ERROR - KB OUTREACH EVENTS GOOGLE SHEET RESPONSE STATUS CODE : "
+ str(response.status_code)
)
lines = response.text.split("\n")
headers = lines.pop(0)
events = dict()
for temp_line in lines:
#print("Temp_line: " + str(temp_line))
temp_line = quote_strip(temp_line)
line = temp_line.split('","')
(event_name, event_date, announcement_date, pre_attendee_list_url, event_type, topic,
presenters, narrative_urls, duration_hours, app_categories, estimated_attendance,
location, point_of_contact, feedback_form_url, comments ) = line[:15]
attendee_list_url = ""
if pre_attendee_list_url is not None and pre_attendee_list_url.startswith("https://docs.google.com/spreadsheets/"):
attendee_list_url = pre_attendee_list_url.rsplit("/",1)[0] + "/gviz/tq"
announcement_used = None
if announcement_date.strip() == "":
announcement_used = None
else:
announcement_used = announcement_date.strip()
events[event_name] = {
"event_date": event_date.strip(),
"announcement_date": announcement_used,
"attendee_list_url": attendee_list_url.strip(),
"event_type": event_type.strip(),
"topic": topic.strip(),
"presenters": presenters.strip(),
"narrative_urls" : narrative_urls.strip(),
"duration_hours": duration_hours.strip(),
"app_categories": app_categories.strip(),
"estimated_attendance": estimated_attendance.strip(),
"location": location.strip(),
"point_of_contact": point_of_contact.strip(),
"feedback_form_url": feedback_form_url.strip(),
"comments": comments.strip()}
return events
def upload_events(events):
"""
Takes the events dict and populates the outreach_events table
in the metrics MySQL DB.
"""
total_events = len(events.keys())
rows_info_inserted = 0
rows_info_updated = 0
rows_stats_inserted = 0
# connect to mysql
# db_connection = mysql.connect(
# host=sql_host, user="metrics", passwd=metrics_mysql_password, database="metrics"
# )
cursor = db_connection.cursor()
query = "use " + query_on
cursor.execute(query)
# get all existing users
existing_events_info = dict()
query = (
"select outreach_event_name, event_date, announcement_date, attendee_list_url, event_type, "
"topic, presenters, narrative_urls, duration_hours, app_categories, "
"estimated_attendance, location, point_of_contact, feedback_form_url, comments "
"from metrics.outreach_events"
)
cursor.execute(query)
for (
event_name,
announcement_date,
event_date,
attendee_list_url,
event_type,
topic,
presenters,
narrative_urls,
duration_hours,
app_categories,
estimated_attendance,
location,
point_of_contact,
feedback_form_url,
comments,
) in cursor:
existing_events_info[event_name] = {
"event_date": event_date,
"announcement_date": announcement_date,
"attendee_list_url": attendee_list_url,
"event_type": event_type,
"topic": topic,
"presenters": presenters,
"narrative_urls" : narrative_urls,
"duration_hours": duration_hours,
"app_categories": app_categories,
"estimated_attendance": estimated_attendance,
"location": location,
"point_of_contact": point_of_contact,
"feedback_form_url": feedback_form_url,
"comments": comments,
}
print("Number of existing events:" + str(len(existing_events_info)))
prep_cursor = db_connection.cursor(prepared=True)
events_insert_statement = (
"insert into outreach_events "
"(outreach_event_name, event_date, announcement_date, attendee_list_url, event_type, "
"topic, presenters, narrative_urls, duration_hours, app_categories, "
"estimated_attendance, location, "
"point_of_contact, feedback_form_url, comments) "
"values(%s, %s, %s, %s, %s, %s, %s, "
"%s, %s, %s, %s, %s, %s, %s, %s);"
)
update_prep_cursor = db_connection.cursor(prepared=True)
events_update_statement = (
"update outreach_events "
"set event_date = %s, announcement_date = %s, "
"attendee_list_url = %s, event_type = %s, topic = %s, presenters = %s, "
"narrative_urls = %s, duration_hours = %s, "
"app_categories = %s, estimated_attendance = %s, location = %s, "
"point_of_contact = %s, feedback_form_url = %s, comments = %s "
"where outreach_event_name = %s;"
)
new_events_count = 0
events_updated_count = 0
for event_name in events:
# check if new user_info exists in the existing user info, if not insert the record.
if event_name not in existing_events_info:
input = (
event_name,
events[event_name]["event_date"],
events[event_name]["announcement_date"],
events[event_name]["attendee_list_url"],
events[event_name]["event_type"],
events[event_name]["topic"],
events[event_name]["presenters"],
events[event_name]["narrative_urls"],
events[event_name]["duration_hours"],
events[event_name]["app_categories"],
events[event_name]["estimated_attendance"],
events[event_name]["location"],
events[event_name]["point_of_contact"],
events[event_name]["feedback_form_url"],
events[event_name]["comments"],
)
prep_cursor.execute(events_insert_statement, input)
new_events_count += 1
else:
# Check if anything has changed in the events table, if so update the record
if not (
(
events[event_name]["event_date"] is None
or (events[event_name]["event_date"]
== str(existing_events_info[event_name]['event_date']))
)
and
(
events[event_name]["announcement_date"] is None
or (events[event_name]["announcement_date"]
== str(existing_events_info[event_name]['announcement_date']))
)
and events[event_name]["attendee_list_url"]
== existing_events_info[event_name]["attendee_list_url"]
and events[event_name]["event_type"]
== existing_events_info[event_name]["event_type"]
and events[event_name]["presenters"]
== existing_events_info[event_name]["presenters"]
and events[event_name]["topic"]
== existing_events_info[event_name]["topic"]
and events[event_name]["narrative_urls"]
== existing_events_info[event_name]["narrative_urls"]
and int(events[event_name]["duration_hours"])
== int(existing_events_info[event_name]["duration_hours"])
and events[event_name]["app_categories"]
== existing_events_info[event_name]["app_categories"]
and int(events[event_name]["estimated_attendance"])
== int(existing_events_info[event_name]["estimated_attendance"])
and events[event_name]["location"]
== existing_events_info[event_name]["location"]
and events[event_name]["point_of_contact"]
== existing_events_info[event_name]["point_of_contact"]
and events[event_name]["feedback_form_url"]
== existing_events_info[event_name]["feedback_form_url"]
and events[event_name]["comments"]
== existing_events_info[event_name]["comments"]
):
input = (
events[event_name]["event_date"],
events[event_name]["announcement_date"],
events[event_name]["attendee_list_url"],
events[event_name]["event_type"],
events[event_name]["topic"],
events[event_name]["presenters"],
events[event_name]["narrative_urls"],
events[event_name]["duration_hours"],
events[event_name]["app_categories"],
events[event_name]["estimated_attendance"],
events[event_name]["location"],
events[event_name]["point_of_contact"],
events[event_name]["feedback_form_url"],
events[event_name]["comments"],
event_name,
)
update_prep_cursor.execute(events_update_statement, input)
events_updated_count += 1
existing_event_names_set = existing_events_info.keys()
current_event_names_set = events.keys()
db_only_event_names = existing_event_names_set - current_event_names_set
if len(db_only_event_names) > 0:
print("*****************************")
print("It appears events were removed or renamed. The following events are in the DB, ")
print("but are currently not in the outreach events sheet. ")
print("If they are truly meant to be deleted please contact the DBA.")
for db_event in db_only_event_names:
print(str(db_event))
db_connection.commit()
print("Number of new events inserted:" + str(new_events_count))
print("Number of events updated:" + str(events_updated_count))
return 1
def upload_event_users(events):
"""
uploads the outreach_event_users
"""
cursor = db_connection.cursor()
query = "use " + query_on
cursor.execute(query)
# Query to check for existing records
# Existing meeting users dict Top level key is event_name
# Value is a set of all the user_names
existing_event_users_dict = dict()
query = ("select outreach_event_name, username from metrics.outreach_event_users")
cursor.execute(query)
for (event_name, username) in cursor:
if event_name not in existing_event_users_dict:
existing_event_users_dict[event_name] = set()
existing_event_users_dict[event_name].add(username)
# Make set of a valid usernames
valid_usernames = set()
query = ("select username from metrics.user_info")
cursor.execute(query)
for (username) in cursor:
valid_usernames.add(username[0])
prep_cursor = db_connection.cursor(prepared=True)
event_users_insert_statement = (
"insert into metrics.outreach_event_users "
"(outreach_event_name, username) "
"values( %s, %s )"
)
total_new_users_all_events = 0
for event_name in events:
if events[event_name]["attendee_list_url"] != "":
if event_name not in existing_event_users_dict:
existing_event_users_dict[event_name] = set()
previous_existing_count = len(existing_event_users_dict[event_name])
new_users_in_event = 0
old_users_in_event_accounted_for = 0
params = (("tqx", "out:csv"), ("sheet", "Sheet1"))
response = requests.get(events[event_name]["attendee_list_url"], params=params)
if response.status_code != 200:
print("*****************************")
print("*****************************")
print(
"ERROR - UNABLE TO OPEN ATTENDEE LIST FOR EVENT: "
+ str(event_name) + " URL: "
+ str(events[event_name]["attendee_list_url"])
+ " sheet named 'Sheet1' - ERROR: "
+ str(response.status_code)
)
continue
lines = response.text.split("\n")
valid_lines_count = 0
event_users_set = set()
for line in lines:
elements = line.split(',')
username = quote_strip(elements[0])
username = username.strip()
if username != "":
valid_lines_count += 1
if username in event_users_set:
print("Event : " + str(event_name) + " has duplicate username : " + str(username))
else:
event_users_set.add(username)
new_users = event_users_set.difference(existing_event_users_dict[event_name])
users_removed_from_list = existing_event_users_dict[event_name].difference(event_users_set)
if len(users_removed_from_list) > 0:
# PRINT WARNINGS FOR USERNAME THAT HAVE BEEN REMVED FROM THE SHEET.
# Possibly want to remove them instead, need to talk with Ben
print("*****************************")
print("The following usernames were removed from the google sheet for event: ")
print(str(event_name) + " but were present in the past. ")
print("If they need to be removed from the event for real please contact the DBA.")
for username in users_removed_from_list:
print(str(username))
if len(new_users) > 0:
invalid_usernames = set()
for new_user in new_users:
# check if usernames are valid
if new_user in valid_usernames:
# These are new users that were not yet an attendee for the event
# Insert the new user names
total_new_users_all_events += 1
new_users_in_event += 1
input = (event_name, new_user)
prep_cursor.execute(event_users_insert_statement, input)
else:
invalid_usernames.add(new_user)
if len(invalid_usernames) > 0:
print("*****************************")
print("Event attendee list for " + str(event_name) + " has the following invalid usernames:")
for invalid_username in invalid_usernames:
print(str(invalid_username))
print("Event : " + str(event_name) + " had " + str(new_users_in_event) + " new users added.")
print("Across all events " + str(total_new_users_all_events) + " new users added.")
db_connection.commit()
return 1
print("############################################")
print("############################################")
print("############################################")
print("OUTREACH EVENTS Upload (UTC): " + str(datetime.datetime.utcnow()))
start_time = time.time()
events = get_outreach_events()
#print("Events:" + str(events))
upload_events(events)
upload_event_users(events)
print(
"--- Uploading Outreach event took %s seconds ---"
% (time.time() - start_time)
)
|
[] |
[] |
[
"KB_OUTREACH_EVENTS_URL",
"QUERY_ON",
"SQL_HOST",
"METRICS_MYSQL_PWD"
] |
[]
|
["KB_OUTREACH_EVENTS_URL", "QUERY_ON", "SQL_HOST", "METRICS_MYSQL_PWD"]
|
python
| 4 | 0 | |
tensorflow/horovod_mnist.py
|
#!/usr/bin/env python
import argparse,logging,time,sys,os,json
sys.path.append('..')
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = logging.getLogger(__name__)
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
from tensorflow.keras.mixed_precision import experimental as mixed_precisionss
from tools.CalcMean import CalcMean
DEFAULT_EPOCHS = 5
DEFAULT_BATCH_SIZE = 25
DEFUALT_OUTPUT = __file__.replace('.py','.json')
DEFAULT_INTEROP = int(os.cpu_count() / 4)
DEFAULT_INTRAOP = int(os.cpu_count() / 4)
def main():
''' horovod enabled implimentation of DL training using MNIST data and the Tensorflow framework. '''
logging_format = '%(asctime)s %(levelname)s:%(name)s:%(message)s'
logging_datefmt = '%Y-%m-%d %H:%M:%S'
logging_level = logging.INFO
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i','--input',help='path to mnist dataset on disk. Use "wget https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz" if you need to download it.',required=True)
parser.add_argument('-e','--epochs',help='number of epochs to train [DEFAULT=%d]' % DEFAULT_EPOCHS,default=DEFAULT_EPOCHS,type=int)
parser.add_argument('-b','--batch-size',help='batch size for training [DEFAULT=%d]' % DEFAULT_BATCH_SIZE,default=DEFAULT_BATCH_SIZE,type=int)
parser.add_argument('-o','--output',help='output json filename where metrics will be stored[DEFAULT=%s]' % DEFUALT_OUTPUT,default=DEFUALT_OUTPUT)
parser.add_argument('--interop',type=int,help='set Tensorflow "inter_op_parallelism_threads" session config varaible [default: %s]' % DEFAULT_INTEROP,default=DEFAULT_INTEROP)
parser.add_argument('--intraop',type=int,help='set Tensorflow "intra_op_parallelism_threads" session config varaible [default: %s]' % DEFAULT_INTRAOP,default=DEFAULT_INTRAOP)
parser.add_argument('--horovod', default=False, action='store_true', help="Use MPI with horovod")
parser.add_argument('--debug', dest='debug', default=False, action='store_true', help="Set Logger to DEBUG")
parser.add_argument('--error', dest='error', default=False, action='store_true', help="Set Logger to ERROR")
parser.add_argument('--warning', dest='warning', default=False, action='store_true', help="Set Logger to ERROR")
parser.add_argument('--logfilename',dest='logfilename',default=None,help='if set, logging information will go to file')
args = parser.parse_args()
hvd = None
rank = 0
nranks = 1
logging_format = '%(asctime)s %(levelname)s:%(process)s:%(thread)s:%(name)s:%(message)s'
logging_datefmt = '%Y-%m-%d %H:%M:%S'
logging_level = logging.INFO
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if args.horovod:
import horovod
import horovod.tensorflow as hvd
hvd.init()
logging_format = '%(asctime)s %(levelname)s:%(process)s:%(thread)s:' + (
'%05d' % hvd.rank()) + ':%(name)s:%(message)s'
rank = hvd.rank()
nranks = hvd.size()
if rank > 0:
logging_level = logging.WARNING
# Horovod: pin GPU to be used to process local rank (one GPU per process)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
# Setup Logging
if args.debug and not args.error and not args.warning:
logging_level = logging.DEBUG
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
elif not args.debug and args.error and not args.warning:
logging_level = logging.ERROR
elif not args.debug and not args.error and args.warning:
logging_level = logging.WARNING
logging.basicConfig(level=logging_level,
format=logging_format,
datefmt=logging_datefmt,
filename=args.logfilename)
# report rank makeup
if hvd:
logging.warning('rank: %5d size: %5d local rank: %5d local size: %5d', hvd.rank(), hvd.size(),
hvd.local_rank(), hvd.local_size())
tf.config.threading.set_inter_op_parallelism_threads(args.interop)
tf.config.threading.set_intra_op_parallelism_threads(args.intraop)
logger.info('number of gpus: %s',len(gpus))
logger.info('input: %s',args.input)
logger.info('epochs: %s',args.epochs)
logger.info('batch size: %s',args.batch_size)
logger.info('output: %s',args.output)
logger.info('interop: %s',args.interop)
logger.info('intraop: %s',args.intraop)
logger.info('using tensorflow version: %s (%s)',tf.__version__,tf.__git_version__)
logger.info('using tensorflow from: %s',tf.__file__)
if hvd:
logger.info('using horovod version: %s',horovod.__version__)
logger.info('using horovod from: %s',horovod.__file__)
output_data = {
'input': args.input,
'epochs': args.epochs,
'batch_size': args.batch_size,
'interop': args.interop,
'intraop': args.intraop,
'horovod': args.horovod,
'tf_version': tf.__version__,
'tf_path': tf.__file__,
'nranks': nranks,
}
if hvd:
output_data['hvd_version'] = horovod.__version__
output_data['hvd_path'] = horovod.__file__
# can use wget https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(args.input)
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis].astype("float32")
x_test = x_test[..., tf.newaxis].astype("float32")
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(args.batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(args.batch_size)
# Create an instance of the model
model = MyModel()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
# Add Horovod Distributed Optimizer
if hvd:
optimizer = hvd.DistributedOptimizer(optimizer)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
output_data['epoch_data'] = []
first_batch = True
for epoch in range(args.epochs):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
train_img_per_sec = CalcMean()
test_img_per_sec = CalcMean()
batch_counter = 0
for images, labels in train_ds:
start = time.time()
train_step(model,loss_object,optimizer,train_loss,train_accuracy,
images, labels, hvd, first_batch)
duration = time.time() - start
current_img_rate = args.batch_size / duration
if hvd:
current_img_rate *= hvd.size()
# first few batches are slow due to compile time, so exclude them from the average
if batch_counter > 10:
train_img_per_sec.add_value(current_img_rate)
first_batch = False
batch_counter += 1
batch_counter = 0
for test_images, test_labels in test_ds:
start = time.time()
test_step(model,loss_object,test_loss,test_accuracy,test_images,test_labels)
duration = time.time() - start
current_img_rate = args.batch_size / duration
if hvd:
current_img_rate *= hvd.size()
# first few batches are slow due to compile time, so exclude them from the average
if batch_counter > 1:
test_img_per_sec.add_value(current_img_rate)
batch_counter += 1
#print(model.summary())
logger.info('[Epoch %02d] Train Loss: %10f Acc: %10f ImgRate: %10f = Test Loss: %10f Acc: %10f ImgRate: %10f',
epoch, train_loss.result(), train_accuracy.result(), train_img_per_sec.mean(),
test_loss.result(), test_accuracy.result(), test_img_per_sec.mean())
output_data['epoch_data'].append({
'epoch': epoch,
'train_loss': float(train_loss.result()),
'train_acc': float(train_accuracy.result()),
'train_img_per_sec_mean': train_img_per_sec.mean(),
'train_img_per_sec_sigma': train_img_per_sec.sigma(),
'test_loss': float(test_loss.result()),
'test_acc': float(test_accuracy.result()),
'test_img_per_sec_mean': test_img_per_sec.mean(),
'test_img_per_sec_sigma': test_img_per_sec.sigma(),
})
if rank == 0:
json.dump(output_data,open(args.output,'w'), sort_keys=True, indent=2)
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
@tf.function
def train_step(model,loss_object,optimizer,train_loss,train_accuracy,
images, labels, hvd, first_batch):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
# Horovod: add Horovod Distributed GradientTape.
if hvd:
tape = hvd.DistributedGradientTape(tape)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
#
# Note: broadcast should be done after the first gradient step to ensure optimizer
# initialization.
if first_batch and hvd:
hvd.broadcast_variables(model.variables, root_rank=0)
hvd.broadcast_variables(optimizer.variables(), root_rank=0)
@tf.function
def test_step(model,loss_object,test_loss,test_accuracy,images, labels):
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"TF_CPP_MIN_VLOG_LEVEL",
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_VLOG_LEVEL", "TF_CPP_MIN_LOG_LEVEL"]
|
python
| 2 | 0 | |
tools/runners/run-test-suite-test262.py
|
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import shutil
import subprocess
import sys
import util
def get_platform_cmd_prefix():
if sys.platform == 'win32':
return ['cmd', '/S', '/C']
return ['python2'] # The official test262.py isn't python3 compatible, but has python shebang.
def get_arguments():
execution_runtime = os.environ.get('RUNTIME', '')
parser = argparse.ArgumentParser()
parser.add_argument('--runtime', metavar='FILE', default=execution_runtime,
help='Execution runtime (e.g. qemu)')
parser.add_argument('--engine', metavar='FILE', required=True,
help='JerryScript binary to run tests with')
parser.add_argument('--test-dir', metavar='DIR', required=True,
help='Directory contains test262 test suite')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--es51', action='store_true',
help='Run test262 ES5.1 version')
group.add_argument('--es2015', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES2015. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
group.add_argument('--esnext', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES.next. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-test-list', metavar='LIST',
help='Add a comma separated list of tests or directories to run in test262 test suite')
args = parser.parse_args()
if args.es2015:
args.test_dir = os.path.join(args.test_dir, 'es2015')
args.test262_harness_dir = os.path.abspath(os.path.dirname(__file__))
args.test262_git_hash = 'fd44cd73dfbce0b515a2474b7cd505d6176a9eb5'
args.excludelist_path = os.path.join('tests', 'test262-es6-excludelist.xml')
elif args.esnext:
args.test_dir = os.path.join(args.test_dir, 'esnext')
args.test262_harness_dir = os.path.abspath(os.path.dirname(__file__))
args.test262_git_hash = '281eb10b2844929a7c0ac04527f5b42ce56509fd'
args.excludelist_path = os.path.join('tests', 'test262-esnext-excludelist.xml')
else:
args.test_dir = os.path.join(args.test_dir, 'es51')
args.test262_harness_dir = args.test_dir
args.test262_git_hash = 'es5-tests'
args.mode = args.es2015 or args.esnext
return args
def prepare_test262_test_suite(args):
if os.path.isdir(os.path.join(args.test_dir, '.git')):
return 0
return_code = subprocess.call(['git', 'clone', '--no-checkout',
'https://github.com/tc39/test262.git', args.test_dir])
if return_code:
print('Cloning test262 repository failed.')
return return_code
return_code = subprocess.call(['git', 'checkout', args.test262_git_hash], cwd=args.test_dir)
assert not return_code, 'Cloning test262 repository failed - invalid git revision.'
if args.es51:
path_to_remove = os.path.join(args.test_dir, 'test', 'suite', 'bestPractice')
if os.path.isdir(path_to_remove):
shutil.rmtree(path_to_remove)
path_to_remove = os.path.join(args.test_dir, 'test', 'suite', 'intl402')
if os.path.isdir(path_to_remove):
shutil.rmtree(path_to_remove)
return 0
def update_exclude_list(args):
print("=== Summary - updating excludelist ===\n")
passing_tests = set()
failing_tests = set()
new_passing_tests = set()
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'r') as report_file:
for line in report_file:
match = re.match('(=== )?(.*) (?:failed|passed) in (?:non-strict|strict)', line)
if match:
(unexpected, test) = match.groups()
test = test.replace('\\', '/')
if unexpected:
failing_tests.add(test + '.js')
else:
passing_tests.add(test + '.js')
# Tests pass in strict-mode but fail in non-strict-mode (or vice versa) should be considered as failures
passing_tests = passing_tests - failing_tests
with open(args.excludelist_path, 'r+') as exclude_file:
lines = exclude_file.readlines()
exclude_file.seek(0)
exclude_file.truncate()
# Skip the last line "</excludeList>" to be able to insert new failing tests.
for line in lines[:-1]:
match = re.match(r" <test id=\"(\S*)\">", line)
if match:
test = match.group(1)
if test in failing_tests:
failing_tests.remove(test)
exclude_file.write(line)
elif test in passing_tests:
new_passing_tests.add(test)
else:
exclude_file.write(line)
else:
exclude_file.write(line)
if failing_tests:
print("New failing tests added to the excludelist")
for test in sorted(failing_tests):
exclude_file.write(' <test id="' + test + '"><reason></reason></test>\n')
print(" " + test)
print("")
exclude_file.write('</excludeList>\n')
if new_passing_tests:
print("New passing tests removed from the excludelist")
for test in sorted(new_passing_tests):
print(" " + test)
print("")
if failing_tests or new_passing_tests:
print("Excludelist was updated succesfully.")
return 1
print("Excludelist was already up-to-date.")
return 0
def main(args):
return_code = prepare_test262_test_suite(args)
if return_code:
return return_code
if sys.platform == 'win32':
original_timezone = util.get_timezone()
util.set_sighdl_to_reset_timezone(original_timezone)
util.set_timezone('Pacific Standard Time')
command = (args.runtime + ' ' + args.engine).strip()
if args.es2015 or args.esnext:
try:
subprocess.check_output(["timeout", "--version"])
command = "timeout 3 " + command
except subprocess.CalledProcessError:
pass
kwargs = {}
if sys.version_info.major >= 3:
kwargs['errors'] = 'ignore'
if args.es51:
test262_harness_path = os.path.join(args.test262_harness_dir, 'tools/packaging/test262.py')
else:
test262_harness_path = os.path.join(args.test262_harness_dir, 'test262-harness.py')
test262_command = get_platform_cmd_prefix() + \
[test262_harness_path,
'--command', command,
'--tests', args.test_dir,
'--summary']
if 'excludelist_path' in args and args.mode == 'default':
test262_command.extend(['--exclude-list', args.excludelist_path])
if args.test262_test_list:
test262_command.extend(args.test262_test_list.split(','))
proc = subprocess.Popen(test262_command,
universal_newlines=True,
stdout=subprocess.PIPE,
**kwargs)
return_code = 1
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'w') as output_file:
counter = 0
summary_found = False
summary_end_found = False
while True:
output = proc.stdout.readline()
if not output:
break
output_file.write(output)
if output.startswith('=== Summary ==='):
summary_found = True
print('')
if summary_found:
if not summary_end_found:
print(output, end='')
if not output.strip():
summary_end_found = True
if 'All tests succeeded' in output:
return_code = 0
elif re.search('in (non-)?strict mode', output):
counter += 1
if (counter % 100) == 0:
print(".", end='')
if (counter % 5000) == 0:
print(" Executed %d tests." % counter)
proc.wait()
if sys.platform == 'win32':
util.set_timezone(original_timezone)
if args.mode == 'update':
return_code = update_exclude_list(args)
return return_code
if __name__ == "__main__":
sys.exit(main(get_arguments()))
|
[] |
[] |
[
"RUNTIME"
] |
[]
|
["RUNTIME"]
|
python
| 1 | 0 | |
csrv/model/parameters.py
|
"""Parameters passed to an action/ability.
These represent targets for an action such as cards to trash duting install,
or cards to select for an operation.
They should be as general as possible so the client doesn't have to know too
many different request types. Some minimal enforcement can be done here, but
the action should do the heavy lifting for validation.
"""
import re
from csrv.model import errors
from csrv.model import game_object
VALID_FIELD_RE = re.compile(r'^[a-z][a-z_]+$')
NO_RESOLVE_FIELDS = set(['number', 'credits'])
class Response(object):
def _resolve(self, game, field, val):
if field in NO_RESOLVE_FIELDS:
return val
else:
return game.get_game_object(val)
def merge_from_dict(self, game, d):
for key in d.keys():
if VALID_FIELD_RE.match(key):
var = getattr(self, key)
if isinstance(var, list):
for val in d[key]:
var.append(self._resolve(game, key, val))
else:
setattr(self, key, self._resolve(game, key, d[key]))
class Request(game_object.GameObject):
RESPONSE_CLASS = Response
def __init__(self, game, card=None):
game_object.GameObject.__init__(self, game)
self.card = card
def new_response(self):
return self.RESPONSE_CLASS()
def valid_response_options(self):
return {}
class NullResponse(Response):
def __bool__(self):
return False
class NullRequest(Request):
RESPONSE_CLASS = NullResponse
class InstallIceResponse(Response):
def __init__(self):
self.server = None
self.ice_to_trash = []
class InstallIceRequest(Request):
RESPONSE_CLASS = InstallIceResponse
class InstallProgramResponse(Response):
def __init__(self):
self.programs_to_trash = []
self.host = None
class InstallProgramRequest(Request):
RESPONSE_CLASS = InstallProgramResponse
def valid_response_options(self):
hosts = self.card.install_host_targets()
programs_to_trash = self.card.install_programs_to_trash_targets()
return {
'host': [h.game_id for h in hosts],
'programs_to_trash': [p.game_id for p in programs_to_trash],
}
class InstallHardwareResponse(Response):
def __init__(self):
self.host = None
class InstallHardwareRequest(Request):
RESPONSE_CLASS = InstallHardwareResponse
def valid_response_options(self):
hosts = self.card.install_host_targets()
return {
'host': [h.game_id for h in hosts],
}
class InstallResourceResponse(Response):
pass
class InstallResourceRequest(Request):
RESPONSE_CLASS = InstallResourceResponse
class InstallAgendaAssetUpgradeResponse(Response):
def __init__(self):
self.server = None
self.cards_to_trash = []
class InstallAgendaAssetUpgradeRequest(Request):
RESPONSE_CLASS = InstallAgendaAssetUpgradeResponse
class InstallAgendaAssetResponse(InstallAgendaAssetUpgradeResponse):
pass
class InstallAgendaAssetRequest(InstallAgendaAssetUpgradeRequest):
RESPONSE_CLASS = InstallAgendaAssetResponse
class InstallUpgradeResponse(InstallAgendaAssetUpgradeResponse):
pass
class InstallUpgradeRequest(InstallAgendaAssetUpgradeRequest):
RESPONSE_CLASS = InstallUpgradeResponse
class TargetInstalledCorpCardResponse(Response):
def __init__(self):
Response.__init__(self)
self.card = None
class TargetInstalledCorpCardRequest(Request):
RESPONSE_CLASS = TargetInstalledCorpCardResponse
class TargetServerResponse(Response):
def __init__(self):
Response.__init__(self)
self.server = None
class TargetServerRequest(Request):
RESPONSE_CLASS = TargetServerResponse
class StackCardResponse(Response):
def __init__(self):
Response.__init__(self)
self.card = None
self.cards = []
class StackCardRequest(Request):
"""A request for cards from the runners"""
RESPONSE_CLASS = StackCardResponse
def __init__(self, game, card=None, max_cards=1, min_cards=1):
Request.__init__(self, game, card)
self.max_cards = max_cards
self.min_cards = min_cards
class ArchivesCardsResponse(Response):
def __init__(self):
Response.__init__(self)
self.cards = []
class ArchivesCardsRequest(Request):
RESPONSE_CLASS = ArchivesCardsResponse
class RndCardsResponse(Response):
def __init__(self):
Response.__init__(self)
self.cards = []
class RndCardsRequest(Request):
RESPONSE_CLASS = RndCardsResponse
class HqCardsResponse(Response):
def __init__(self):
Response.__init__(self)
self.cards = []
class HqCardsRequest(Request):
RESPONSE_CLASS = HqCardsResponse
class HeapCardsResponse(Response):
def __init__(self):
Response.__init__(self)
self.cards = []
class HeapCardsRequest(Request):
RESPONSE_CLASS = HeapCardsResponse
class ForfeitAgendaResponse(Response):
def __init__(self):
Response.__init__(self)
self.agenda = None
class ForfeitAgendaRequest(Request):
RESPONSE_CLASS = ForfeitAgendaResponse
def valid_response_options(self):
agendas = self.card.forfeit_agenda_targets()
return {
'agendas': [a.game_id for a in agendas],
}
class NumericChoiceResponse(Response):
def __init__(self):
Response.__init__(self)
self.number = 0
class NumericChoiceRequest(Request):
RESPONSE_CLASS = NumericChoiceResponse
class VariableCreditCostResponse(Response):
def __init__(self):
Response.__init__(self)
self.credits = 0
class VariableCreditCostRequest(Request):
RESPONSE_CLASS = VariableCreditCostResponse
class ArrangeCardsResponse(Response):
def __init__(self):
Response.__init__(self)
self.cards = []
class ArrangeCardsRequest(Request):
RESPONSE_CLASS = ArrangeCardsResponse
def __init__(self, game, cards=None):
Request.__init__(self, game)
self.cards = cards
def valid_response_options(self):
return {'cards': [c.game_id for c in self.cards]}
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
src/sentry/conf/server.py
|
"""
sentry.conf.server
~~~~~~~~~~~~~~~~~~
These settings act as the default (base) settings for the Sentry-provided web-server
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf.global_settings import * # NOQA
import os
import os.path
import socket
import sys
import tempfile
import sentry
from datetime import timedelta
from six.moves.urllib.parse import urlparse
gettext_noop = lambda s: s
socket.setdefaulttimeout(5)
DEBUG = False
TEMPLATE_DEBUG = True
MAINTENANCE = False
ADMINS = ()
INTERNAL_IPS = ()
MANAGERS = ADMINS
APPEND_SLASH = True
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
# XXX(dcramer): handle case when we've installed from source vs just running
# this straight out of the repository
if 'site-packages' in __file__:
NODE_MODULES_ROOT = os.path.join(PROJECT_ROOT, 'node_modules')
else:
NODE_MODULES_ROOT = os.path.join(PROJECT_ROOT, os.pardir, os.pardir, 'node_modules')
NODE_MODULES_ROOT = os.path.normpath(NODE_MODULES_ROOT)
sys.path.insert(0, os.path.normpath(os.path.join(PROJECT_ROOT, os.pardir)))
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': 'sentry',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'AUTOCOMMIT': True,
'ATOMIC_REQUESTS': False,
}
}
if 'DATABASE_URL' in os.environ:
url = urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'postgres':
DATABASES['default']['ENGINE'] = 'sentry.db.postgres'
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
# This should always be UTC.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sv-se', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
from .locale import CATALOGS
LANGUAGES = tuple((code, name) for code, name in LANGUAGES
if code in CATALOGS)
SUPPORTED_LANGUAGES = frozenset(CATALOGS)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'sentry.middleware.proxy.ContentLengthHeaderMiddleware',
'sentry.middleware.security.SecurityHeadersMiddleware',
'sentry.middleware.maintenance.ServicesUnavailableMiddleware',
'sentry.middleware.env.SentryEnvMiddleware',
'sentry.middleware.proxy.SetRemoteAddrFromForwardedFor',
'sentry.middleware.debug.NoIfModifiedSinceMiddleware',
'sentry.middleware.stats.RequestTimingMiddleware',
'sentry.middleware.stats.ResponseCodeMiddleware',
'sentry.middleware.health.HealthCheck', # Must exist before CommonMiddleware
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'sentry.middleware.auth.AuthenticationMiddleware',
'sentry.middleware.sudo.SudoMiddleware',
'sentry.middleware.superuser.SuperuserMiddleware',
'sentry.middleware.locale.SentryLocaleMiddleware',
# TODO(dcramer): kill this once we verify its safe
# 'sentry.middleware.social_auth.SentrySocialAuthExceptionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'sentry.debug.middleware.DebugMiddleware',
)
ROOT_URLCONF = 'sentry.conf.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'django.core.context_processors.request',
'social_auth.context_processors.social_auth_by_name_backends',
'social_auth.context_processors.social_auth_backends',
'social_auth.context_processors.social_auth_by_type_backends',
'social_auth.context_processors.social_auth_login_redirect'
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crispy_forms',
'debug_toolbar',
'raven.contrib.django.raven_compat',
'rest_framework',
'sentry',
'sentry.nodestore',
'sentry.search',
'sentry.lang.javascript',
'sentry.lang.native',
'sentry.plugins.sentry_interface_types',
'sentry.plugins.sentry_mail',
'sentry.plugins.sentry_urls',
'sentry.plugins.sentry_useragents',
'sentry.plugins.sentry_webhooks',
'social_auth',
'south',
'sudo',
)
STATIC_ROOT = os.path.realpath(os.path.join(PROJECT_ROOT, 'static'))
STATIC_URL = '/_static/{version}/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
ASSET_VERSION = 0
# setup a default media root to somewhere useless
MEDIA_ROOT = '/tmp/sentry-media'
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
CSRF_FAILURE_VIEW = 'sentry.web.frontend.csrf_failure.view'
CSRF_COOKIE_NAME = 'sc'
# Auth configuration
try:
from django.core.urlresolvers import reverse_lazy
except ImportError:
LOGIN_REDIRECT_URL = '/login-redirect/'
LOGIN_URL = '/auth/login/'
else:
LOGIN_REDIRECT_URL = reverse_lazy('sentry-login-redirect')
LOGIN_URL = reverse_lazy('sentry-login')
AUTHENTICATION_BACKENDS = (
'sentry.utils.auth.EmailAuthBackend',
# TODO(dcramer): remove social auth backends in 8.11
'social_auth.backends.github.GithubBackend',
'social_auth.backends.bitbucket.BitbucketBackend',
'social_auth.backends.trello.TrelloBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'sentry.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 6,
},
},
]
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL = 'sentry.User'
SOCIAL_AUTH_AUTHENTICATION_BACKENDS = (
'social_auth.backends.github.GithubBackend',
'social_auth.backends.bitbucket.BitbucketBackend',
'social_auth.backends.trello.TrelloBackend',
)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
SESSION_COOKIE_NAME = "sentrysid"
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
BITBUCKET_CONSUMER_KEY = ''
BITBUCKET_CONSUMER_SECRET = ''
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.misc.save_status_to_session',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
'social_auth.backends.pipeline.misc.save_status_to_session',
)
SOCIAL_AUTH_REVOKE_TOKENS_ON_DISCONNECT = True
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/account/settings/identities/'
SOCIAL_AUTH_ASSOCIATE_ERROR_URL = SOCIAL_AUTH_LOGIN_REDIRECT_URL
INITIAL_CUSTOM_USER_MIGRATION = '0108_fix_user'
# Auth engines and the settings required for them to be listed
AUTH_PROVIDERS = {
'github': ('GITHUB_APP_ID', 'GITHUB_API_SECRET'),
'trello': ('TRELLO_API_KEY', 'TRELLO_API_SECRET'),
'bitbucket': ('BITBUCKET_CONSUMER_KEY', 'BITBUCKET_CONSUMER_SECRET'),
}
AUTH_PROVIDER_LABELS = {
'github': 'GitHub',
'trello': 'Trello',
'bitbucket': 'Bitbucket'
}
import random
SOCIAL_AUTH_DEFAULT_USERNAME = lambda: random.choice(['Darth Vader', 'Obi-Wan Kenobi', 'R2-D2', 'C-3PO', 'Yoda'])
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
SOCIAL_AUTH_FORCE_POST_DISCONNECT = True
# Queue configuration
from kombu import Exchange, Queue
BROKER_URL = "redis://localhost:6379"
BROKER_TRANSPORT_OPTIONS = {}
# Ensure workers run async by default
# in Development you might want them to run in-process
# though it would cause timeouts/recursions in some cases
CELERY_ALWAYS_EAGER = False
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IGNORE_RESULT = True
CELERY_SEND_EVENTS = False
CELERY_RESULT_BACKEND = None
CELERY_TASK_RESULT_EXPIRES = 1
CELERY_DISABLE_RATE_LIMITS = True
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
CELERY_REDIRECT_STDOUTS = False
CELERYD_HIJACK_ROOT_LOGGER = False
CELERY_IMPORTS = (
'sentry.tasks.auth',
'sentry.tasks.auto_resolve_issues',
'sentry.tasks.beacon',
'sentry.tasks.check_auth',
'sentry.tasks.clear_expired_snoozes',
'sentry.tasks.collect_project_platforms',
'sentry.tasks.deletion',
'sentry.tasks.digests',
'sentry.tasks.dsymcache',
'sentry.tasks.email',
'sentry.tasks.merge',
'sentry.tasks.options',
'sentry.tasks.ping',
'sentry.tasks.post_process',
'sentry.tasks.process_buffer',
'sentry.tasks.reports',
'sentry.tasks.store',
)
CELERY_QUEUES = [
Queue('alerts', routing_key='alerts'),
Queue('auth', routing_key='auth'),
Queue('cleanup', routing_key='cleanup'),
Queue('default', routing_key='default'),
Queue('digests.delivery', routing_key='digests.delivery'),
Queue('digests.scheduling', routing_key='digests.scheduling'),
Queue('email', routing_key='email'),
Queue('events.preprocess_event', routing_key='events.preprocess_event'),
Queue('events.process_event', routing_key='events.process_event'),
Queue('events.save_event', routing_key='events.save_event'),
Queue('merge', routing_key='merge'),
Queue('options', routing_key='options'),
Queue('reports.deliver', routing_key='reports.deliver'),
Queue('reports.prepare', routing_key='reports.prepare'),
Queue('search', routing_key='search'),
Queue('stats', routing_key='stats'),
Queue('update', routing_key='update'),
]
for queue in CELERY_QUEUES:
queue.durable = False
CELERY_ROUTES = ('sentry.queue.routers.SplitQueueRouter',)
def create_partitioned_queues(name):
exchange = Exchange(name, type='direct')
for num in range(1):
CELERY_QUEUES.append(Queue(
'{0}-{1}'.format(name, num),
exchange=exchange,
))
create_partitioned_queues('counters')
create_partitioned_queues('triggers')
from celery.schedules import crontab
CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), 'sentry-celerybeat')
CELERYBEAT_SCHEDULE = {
'check-auth': {
'task': 'sentry.tasks.check_auth',
'schedule': timedelta(minutes=1),
'options': {
'expires': 60,
'queue': 'auth',
}
},
'send-beacon': {
'task': 'sentry.tasks.send_beacon',
'schedule': timedelta(hours=1),
'options': {
'expires': 3600,
},
},
'send-ping': {
'task': 'sentry.tasks.send_ping',
'schedule': timedelta(minutes=1),
'options': {
'expires': 60,
},
},
'flush-buffers': {
'task': 'sentry.tasks.process_buffer.process_pending',
'schedule': timedelta(seconds=10),
'options': {
'expires': 10,
'queue': 'counters-0',
}
},
'sync-options': {
'task': 'sentry.tasks.options.sync_options',
'schedule': timedelta(seconds=10),
'options': {
'expires': 10,
'queue': 'options',
}
},
'schedule-digests': {
'task': 'sentry.tasks.digests.schedule_digests',
'schedule': timedelta(seconds=30),
'options': {
'expires': 30,
},
},
'clear-expired-snoozes': {
'task': 'sentry.tasks.clear_expired_snoozes',
'schedule': timedelta(minutes=5),
'options': {
'expires': 300,
},
},
# Disabled for the time being:
# 'clear-old-cached-dsyms': {
# 'task': 'sentry.tasks.clear_old_cached_dsyms',
# 'schedule': timedelta(minutes=60),
# 'options': {
# 'expires': 3600,
# },
# },
'collect-project-platforms': {
'task': 'sentry.tasks.collect_project_platforms',
'schedule': timedelta(days=1),
'options': {
'expires': 3600 * 24,
},
},
'schedule-auto-resolution': {
'task': 'sentry.tasks.schedule_auto_resolution',
'schedule': timedelta(minutes=15),
'options': {
'expires': 60 * 25,
},
},
'schedule-weekly-organization-reports': {
'task': 'sentry.tasks.reports.prepare_reports',
'schedule': crontab(
minute=0,
hour=12, # 05:00 PDT, 09:00 EDT, 12:00 UTC
day_of_week='monday',
),
'options': {
'expires': 60 * 60 * 3,
},
},
}
# Sentry logs to two major places: stdout, and it's internal project.
# To disable logging to the internal project, add a logger who's only
# handler is 'console' and disable propagating upwards.
# Additionally, Sentry has the ability to override logger levels by
# providing the cli with -l/--loglevel or the SENTRY_LOG_LEVEL env var.
# The loggers that it overrides are root and any in LOGGING.overridable.
# Be very careful with this in a production system, because the celery
# logger can be extremely verbose when given INFO or DEBUG.
LOGGING = {
'default_level': 'INFO',
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'null': {
'class': 'django.utils.log.NullHandler',
},
'console': {
'class': 'sentry.logging.handlers.StructLogHandler',
},
'internal': {
'level': 'ERROR',
'filters': ['sentry:internal'],
'class': 'raven.contrib.django.handlers.SentryHandler',
},
},
'filters': {
'sentry:internal': {
'()': 'sentry.utils.raven.SentryInternalFilter',
},
},
'root': {
'level': 'NOTSET',
'handlers': ['console', 'internal'],
},
# LOGGING.overridable is a list of loggers including root that will change
# based on the overridden level defined above.
'overridable': ['celery', 'sentry'],
'loggers': {
'celery': {
'level': 'WARN',
},
'sentry': {
'level': 'INFO',
},
'sentry.errors': {
'handlers': ['console'],
'propagate': False,
},
'sentry.rules': {
'handlers': ['console'],
'propagate': False,
},
'multiprocessing': {
'handlers': ['console'],
# https://github.com/celery/celery/commit/597a6b1f3359065ff6dbabce7237f86b866313df
# This commit has not been rolled into any release and leads to a
# large amount of errors when working with postgres.
'level': 'CRITICAL',
'propagate': False,
},
'celery.worker.job': {
'handlers': ['console'],
'propagate': False,
},
'static_compiler': {
'level': 'INFO',
},
'django.request': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'toronado': {
'level': 'ERROR',
'handlers': ['null'],
'propagate': False,
},
'urllib3.connectionpool': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'boto3': {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
},
'botocore': {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
},
}
}
# django-rest-framework
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_PERMISSION_CLASSES': (
'sentry.api.permissions.NoPermission',
),
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Percy config for visual regression testing.
PERCY_DEFAULT_TESTING_WIDTHS = (1280, 375)
# Debugger
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.timer.TimerPanel',
'sentry.debug.panels.route.RoutePanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.sql.SQLPanel',
# TODO(dcramer): https://github.com/getsentry/sentry/issues/1722
# 'sentry.debug.panels.redis.RedisPanel',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Sentry and Raven configuration
SENTRY_CLIENT = 'sentry.utils.raven.SentryInternalClient'
SENTRY_FEATURES = {
'auth:register': True,
'organizations:api-keys': True,
'organizations:create': True,
'organizations:sso': True,
'organizations:callsigns': False,
'projects:global-events': False,
'projects:quotas': True,
'projects:plugins': True,
'projects:dsym': False,
'workflow:release-emails': False,
}
# Default time zone for localization in the UI.
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
SENTRY_DEFAULT_TIME_ZONE = 'UTC'
# Enable the Sentry Debugger (Beta)
SENTRY_DEBUGGER = False
SENTRY_IGNORE_EXCEPTIONS = (
'OperationalError',
)
# Should we send the beacon to the upstream server?
SENTRY_BEACON = True
# Allow access to Sentry without authentication.
SENTRY_PUBLIC = False
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = False
# Login url (defaults to LOGIN_URL)
SENTRY_LOGIN_URL = None
# Default project ID (for internal errors)
SENTRY_PROJECT = 1
# Project ID for recording frontend (javascript) exceptions
SENTRY_FRONTEND_PROJECT = None
# Only store a portion of all messages per unique group.
SENTRY_SAMPLE_DATA = True
# The following values control the sampling rates
SENTRY_SAMPLE_RATES = (
# up until N events, store 1 in M
(50, 1),
(1000, 2),
(10000, 10),
(100000, 50),
(1000000, 300),
(10000000, 2000),
)
SENTRY_MAX_SAMPLE_RATE = 10000
SENTRY_SAMPLE_TIMES = (
(3600, 1),
(360, 10),
(60, 60),
)
SENTRY_MAX_SAMPLE_TIME = 10000
# Web Service
SENTRY_WEB_HOST = 'localhost'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {}
# SMTP Service
SENTRY_SMTP_HOST = 'localhost'
SENTRY_SMTP_PORT = 1025
SENTRY_INTERFACES = {
'csp': 'sentry.interfaces.csp.Csp',
'device': 'sentry.interfaces.device.Device',
'exception': 'sentry.interfaces.exception.Exception',
'logentry': 'sentry.interfaces.message.Message',
'query': 'sentry.interfaces.query.Query',
'request': 'sentry.interfaces.http.Http',
'sdk': 'sentry.interfaces.sdk.Sdk',
'stacktrace': 'sentry.interfaces.stacktrace.Stacktrace',
'template': 'sentry.interfaces.template.Template',
'user': 'sentry.interfaces.user.User',
'applecrashreport': 'sentry.interfaces.applecrash.AppleCrashReport',
'breadcrumbs': 'sentry.interfaces.breadcrumbs.Breadcrumbs',
'contexts': 'sentry.interfaces.contexts.Contexts',
'threads': 'sentry.interfaces.threads.Threads',
'debug_meta': 'sentry.interfaces.debug_meta.DebugMeta',
'sentry.interfaces.Exception': 'sentry.interfaces.exception.Exception',
'sentry.interfaces.Message': 'sentry.interfaces.message.Message',
'sentry.interfaces.Stacktrace': 'sentry.interfaces.stacktrace.Stacktrace',
'sentry.interfaces.Template': 'sentry.interfaces.template.Template',
'sentry.interfaces.Query': 'sentry.interfaces.query.Query',
'sentry.interfaces.Http': 'sentry.interfaces.http.Http',
'sentry.interfaces.User': 'sentry.interfaces.user.User',
'sentry.interfaces.Csp': 'sentry.interfaces.csp.Csp',
'sentry.interfaces.AppleCrashReport': 'sentry.interfaces.applecrash.AppleCrashReport',
'sentry.interfaces.Breadcrumbs': 'sentry.interfaces.breadcrumbs.Breadcrumbs',
'sentry.interfaces.Contexts': 'sentry.interfaces.contexts.Contexts',
'sentry.interfaces.Threads': 'sentry.interfaces.threads.Threads',
'sentry.interfaces.DebugMeta': 'sentry.interfaces.debug_meta.DebugMeta',
}
SENTRY_EMAIL_BACKEND_ALIASES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'dummy': 'django.core.mail.backends.dummy.EmailBackend',
'console': 'django.core.mail.backends.console.EmailBackend',
}
SENTRY_FILESTORE_ALIASES = {
'filesystem': 'django.core.files.storage.FileSystemStorage',
's3': 'sentry.filestore.s3.S3Boto3Storage',
}
# set of backends that do not support needing SMTP mail.* settings
# This list is a bit fragile and hardcoded, but it's unlikely that
# a user will be using a different backend that also mandates SMTP
# credentials.
SENTRY_SMTP_DISABLED_BACKENDS = frozenset((
'django.core.mail.backends.dummy.EmailBackend',
'django.core.mail.backends.console.EmailBackend',
'django.core.mail.backends.locmem.EmailBackend',
'django.core.mail.backends.filebased.EmailBackend',
'sentry.utils.email.PreviewBackend',
))
# Should users without superuser permissions be allowed to
# make projects public
SENTRY_ALLOW_PUBLIC_PROJECTS = True
# Can users be invited to organizations?
SENTRY_ENABLE_INVITES = True
# Default to not sending the Access-Control-Allow-Origin header on api/store
SENTRY_ALLOW_ORIGIN = None
# Enable scraping of javascript context for source code
SENTRY_SCRAPE_JAVASCRIPT_CONTEXT = True
# Buffer backend
SENTRY_BUFFER = 'sentry.buffer.Buffer'
SENTRY_BUFFER_OPTIONS = {}
# Cache backend
# XXX: We explicitly require the cache to be configured as its not optional
# and causes serious confusion with the default django cache
SENTRY_CACHE = None
SENTRY_CACHE_OPTIONS = {}
# The internal Django cache is still used in many places
# TODO(dcramer): convert uses over to Sentry's backend
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# The cache version affects both Django's internal cache (at runtime) as well
# as Sentry's cache. This automatically overrides VERSION on the default
# CACHES backend.
CACHE_VERSION = 1
# Digests backend
SENTRY_DIGESTS = 'sentry.digests.backends.dummy.DummyBackend'
SENTRY_DIGESTS_OPTIONS = {}
# Quota backend
SENTRY_QUOTAS = 'sentry.quotas.Quota'
SENTRY_QUOTA_OPTIONS = {}
# Rate limiting backend
SENTRY_RATELIMITER = 'sentry.ratelimits.base.RateLimiter'
SENTRY_RATELIMITER_OPTIONS = {}
# The default value for project-level quotas
SENTRY_DEFAULT_MAX_EVENTS_PER_MINUTE = '90%'
# Node storage backend
SENTRY_NODESTORE = 'sentry.nodestore.django.DjangoNodeStorage'
SENTRY_NODESTORE_OPTIONS = {}
# Search backend
SENTRY_SEARCH = 'sentry.search.django.DjangoSearchBackend'
SENTRY_SEARCH_OPTIONS = {}
# SENTRY_SEARCH_OPTIONS = {
# 'urls': ['http://localhost:9200/'],
# 'timeout': 5,
# }
# Time-series storage backend
SENTRY_TSDB = 'sentry.tsdb.dummy.DummyTSDB'
SENTRY_TSDB_OPTIONS = {}
# rollups must be ordered from highest granularity to lowest
SENTRY_TSDB_ROLLUPS = (
# (time in seconds, samples to keep)
(10, 360), # 60 minutes at 10 seconds
(3600, 24 * 7), # 7 days at 1 hour
(3600 * 24, 90), # 90 days at 1 day
)
# Internal metrics
SENTRY_METRICS_BACKEND = 'sentry.metrics.dummy.DummyMetricsBackend'
SENTRY_METRICS_OPTIONS = {}
SENTRY_METRICS_SAMPLE_RATE = 1.0
SENTRY_METRICS_PREFIX = 'sentry.'
# URI Prefixes for generating DSN URLs
# (Defaults to URL_PREFIX by default)
SENTRY_ENDPOINT = None
SENTRY_PUBLIC_ENDPOINT = None
# Prevent variables (e.g. context locals, http data, etc) from exceeding this
# size in characters
SENTRY_MAX_VARIABLE_SIZE = 512
# Prevent variables within extra context from exceeding this size in
# characters
SENTRY_MAX_EXTRA_VARIABLE_SIZE = 4096 * 4 # 16kb
# For changing the amount of data seen in Http Response Body part.
SENTRY_MAX_HTTP_BODY_SIZE = 4096 * 4 # 16kb
# For various attributes we don't limit the entire attribute on size, but the
# individual item. In those cases we also want to limit the maximum number of
# keys
SENTRY_MAX_DICTIONARY_ITEMS = 50
SENTRY_MAX_MESSAGE_LENGTH = 1024 * 8
SENTRY_MAX_STACKTRACE_FRAMES = 50
SENTRY_MAX_EXCEPTIONS = 25
# Gravatar service base url
SENTRY_GRAVATAR_BASE_URL = 'https://secure.gravatar.com'
# Timeout (in seconds) for fetching remote source files (e.g. JS)
SENTRY_SOURCE_FETCH_TIMEOUT = 5
# Timeout (in seconds) for socket operations when fetching remote source files
SENTRY_SOURCE_FETCH_SOCKET_TIMEOUT = 2
# Maximum content length for source files before we abort fetching
SENTRY_SOURCE_FETCH_MAX_SIZE = 40 * 1024 * 1024
# List of IP subnets which should not be accessible
SENTRY_DISALLOWED_IPS = ()
# Fields which managed users cannot change via Sentry UI. Username and password
# cannot be changed by managed users. Optionally include 'email' and
# 'name' in SENTRY_MANAGED_USER_FIELDS.
SENTRY_MANAGED_USER_FIELDS = ()
SENTRY_SCOPES = set([
'org:read',
'org:write',
'org:delete',
'member:read',
'member:write',
'member:delete',
'team:read',
'team:write',
'team:delete',
'project:read',
'project:write',
'project:delete',
'project:releases',
'event:read',
'event:write',
'event:delete',
])
SENTRY_DEFAULT_ROLE = 'member'
# Roles are ordered, which represents a sort-of hierarchy, as well as how
# they're presented in the UI. This is primarily important in that a member
# that is earlier in the chain cannot manage the settings of a member later
# in the chain (they still require the appropriate scope).
SENTRY_ROLES = (
{
'id': 'member',
'name': 'Member',
'desc': 'Members can view and act on events, as well as view most other data within the organization.',
'scopes': set([
'event:read', 'event:write', 'event:delete', 'project:releases',
'project:read', 'org:read', 'member:read', 'team:read',
]),
},
{
'id': 'admin',
'name': 'Admin',
'desc': 'Admin privileges on any teams of which they\'re a member. They can create new teams and projects, as well as remove teams and projects which they already hold membership on.',
'scopes': set([
'event:read', 'event:write', 'event:delete',
'org:read', 'member:read',
'project:read', 'project:write', 'project:delete', 'project:releases',
'team:read', 'team:write', 'team:delete',
]),
},
{
'id': 'manager',
'name': 'Manager',
'desc': 'Gains admin access on all teams as well as the ability to add and remove members.',
'is_global': True,
'scopes': set([
'event:read', 'event:write', 'event:delete',
'member:read', 'member:write', 'member:delete',
'project:read', 'project:write', 'project:delete', 'project:releases',
'team:read', 'team:write', 'team:delete',
'org:read', 'org:write',
]),
},
{
'id': 'owner',
'name': 'Owner',
'desc': 'Gains full permission across the organization. Can manage members as well as perform catastrophic operations such as removing the organization.',
'is_global': True,
'scopes': set([
'org:read', 'org:write', 'org:delete',
'member:read', 'member:write', 'member:delete',
'team:read', 'team:write', 'team:delete',
'project:read', 'project:write', 'project:delete', 'project:releases',
'event:read', 'event:write', 'event:delete',
]),
},
)
# See sentry/options/__init__.py for more information
SENTRY_OPTIONS = {}
SENTRY_DEFAULT_OPTIONS = {}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = False
# Delay (in ms) to induce on API responses
SENTRY_API_RESPONSE_DELAY = 0
# Watchers for various application purposes (such as compiling static media)
# XXX(dcramer): this doesn't work outside of a source distribution as the
# webpack.config.js is not part of Sentry's datafiles
SENTRY_WATCHERS = (
('webpack', [os.path.join(NODE_MODULES_ROOT, '.bin', 'webpack'), '--output-pathinfo', '--watch',
"--config={}".format(os.path.normpath(os.path.join(PROJECT_ROOT, os.pardir, os.pardir, "webpack.config.js")))]),
)
# Max file size for avatar photo uploads
SENTRY_MAX_AVATAR_SIZE = 5000000
# statuspage.io support
STATUS_PAGE_ID = None
STATUS_PAGE_API_HOST = 'statuspage.io'
SENTRY_ONPREMISE = True
def get_raven_config():
return {
'release': sentry.__build__,
'register_signals': True,
'include_paths': [
'sentry',
],
}
RAVEN_CONFIG = get_raven_config()
# Config options that are explicitly disabled from Django
DEAD = object()
# This will eventually get set from values in SENTRY_OPTIONS during
# sentry.runner.initializer:bootstrap_options
SECRET_KEY = DEAD
EMAIL_BACKEND = DEAD
EMAIL_HOST = DEAD
EMAIL_PORT = DEAD
EMAIL_HOST_USER = DEAD
EMAIL_HOST_PASSWORD = DEAD
EMAIL_USE_TLS = DEAD
SERVER_EMAIL = DEAD
EMAIL_SUBJECT_PREFIX = DEAD
SUDO_URL = 'sentry-sudo'
# TODO(dcramer): move this to sentry.io so it can be automated
SDK_VERSIONS = {
'raven-js': '3.3.0',
'raven-python': '5.23.0',
'sentry-laravel': '0.4.0',
'sentry-php': '1.5.0',
}
SDK_URLS = {
'raven-js': 'https://docs.sentry.io/clients/javascript/',
'raven-python': 'https://docs.sentry.io/clients/python/',
'raven-swift': 'https://docs.sentry.io/clients/cocoa/',
'sentry-php': 'https://docs.sentry.io/clients/php/',
'sentry-laravel': 'https://docs.sentry.io/clients/php/integrations/laravel/',
}
DEPRECATED_SDKS = {
# sdk name => new sdk name
'raven-objc': 'sentry-swift',
}
|
[] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
python
| 1 | 0 | |
server/urbanity/wsgi.py
|
"""
WSGI config for firey_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "urbanity.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
plotly_study/graph_objs/histogram/__init__.py
|
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class YBins(_BaseTraceHierarchyType):
# end
# ---
@property
def end(self):
"""
Sets the end value for the y axis bins. The last bin may not
end exactly at this value, we increment the bin edge by `size`
from `start` until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use a date string,
and for category data `end` is based on the category serial
numbers.
The 'end' property accepts values of any type
Returns
-------
Any
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
# size
# ----
@property
def size(self):
"""
Sets the size of each y axis bin. Default behavior: If `nbinsy`
is 0 or omitted, we choose a nice round bin size such that the
number of bins is about the same as the typical number of
samples in each bin. If `nbinsy` is provided, we choose a nice
round bin size giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as in
`axis.dtick`. For category data, the number of categories to
bin together (always defaults to 1). If multiple non-overlaying
histograms share a subplot, the first explicit `size` is used
and all others discarded. If no `size` is provided,the sample
data from all traces is combined to determine `size` as
described above.
The 'size' property accepts values of any type
Returns
-------
Any
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# start
# -----
@property
def start(self):
"""
Sets the starting value for the y axis bins. Defaults to the
minimum data value, shifted down if necessary to make nice
round values and to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin edges 0.5 down,
so a `size` of 5 would have a default `start` of -0.5, so it is
clear that 0-4 are in the first bin, 5-9 in the second, but
continuous data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date string.
For category data, `start` is based on the category serial
numbers, and defaults to -0.5. If multiple non-overlaying
histograms share a subplot, the first explicit `start` is used
exactly and all others are shifted down (if necessary) to
differ from that one by an integer number of bins.
The 'start' property accepts values of any type
Returns
-------
Any
"""
return self["start"]
@start.setter
def start(self, val):
self["start"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
end
Sets the end value for the y axis bins. The last bin
may not end exactly at this value, we increment the bin
edge by `size` from `start` until we reach or exceed
`end`. Defaults to the maximum data value. Like
`start`, for dates use a date string, and for category
data `end` is based on the category serial numbers.
size
Sets the size of each y axis bin. Default behavior: If
`nbinsy` is 0 or omitted, we choose a nice round bin
size such that the number of bins is about the same as
the typical number of samples in each bin. If `nbinsy`
is provided, we choose a nice round bin size giving no
more than that many bins. For date data, use
milliseconds or "M<n>" for months, as in `axis.dtick`.
For category data, the number of categories to bin
together (always defaults to 1). If multiple non-
overlaying histograms share a subplot, the first
explicit `size` is used and all others discarded. If no
`size` is provided,the sample data from all traces is
combined to determine `size` as described above.
start
Sets the starting value for the y axis bins. Defaults
to the minimum data value, shifted down if necessary to
make nice round values and to remove ambiguous bin
edges. For example, if most of the data is integers we
shift the bin edges 0.5 down, so a `size` of 5 would
have a default `start` of -0.5, so it is clear that 0-4
are in the first bin, 5-9 in the second, but continuous
data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date
string. For category data, `start` is based on the
category serial numbers, and defaults to -0.5. If
multiple non-overlaying histograms share a subplot, the
first explicit `start` is used exactly and all others
are shifted down (if necessary) to differ from that one
by an integer number of bins.
"""
def __init__(self, arg=None, end=None, size=None, start=None, **kwargs):
"""
Construct a new YBins object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.YBins
end
Sets the end value for the y axis bins. The last bin
may not end exactly at this value, we increment the bin
edge by `size` from `start` until we reach or exceed
`end`. Defaults to the maximum data value. Like
`start`, for dates use a date string, and for category
data `end` is based on the category serial numbers.
size
Sets the size of each y axis bin. Default behavior: If
`nbinsy` is 0 or omitted, we choose a nice round bin
size such that the number of bins is about the same as
the typical number of samples in each bin. If `nbinsy`
is provided, we choose a nice round bin size giving no
more than that many bins. For date data, use
milliseconds or "M<n>" for months, as in `axis.dtick`.
For category data, the number of categories to bin
together (always defaults to 1). If multiple non-
overlaying histograms share a subplot, the first
explicit `size` is used and all others discarded. If no
`size` is provided,the sample data from all traces is
combined to determine `size` as described above.
start
Sets the starting value for the y axis bins. Defaults
to the minimum data value, shifted down if necessary to
make nice round values and to remove ambiguous bin
edges. For example, if most of the data is integers we
shift the bin edges 0.5 down, so a `size` of 5 would
have a default `start` of -0.5, so it is clear that 0-4
are in the first bin, 5-9 in the second, but continuous
data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date
string. For category data, `start` is based on the
category serial numbers, and defaults to -0.5. If
multiple non-overlaying histograms share a subplot, the
first explicit `start` is used exactly and all others
are shifted down (if necessary) to differ from that one
by an integer number of bins.
Returns
-------
YBins
"""
super(YBins, self).__init__("ybins")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.YBins
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.YBins"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import ybins as v_ybins
# Initialize validators
# ---------------------
self._validators["end"] = v_ybins.EndValidator()
self._validators["size"] = v_ybins.SizeValidator()
self._validators["start"] = v_ybins.StartValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("end", None)
self["end"] = end if end is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("start", None)
self["start"] = start if start is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class XBins(_BaseTraceHierarchyType):
# end
# ---
@property
def end(self):
"""
Sets the end value for the x axis bins. The last bin may not
end exactly at this value, we increment the bin edge by `size`
from `start` until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use a date string,
and for category data `end` is based on the category serial
numbers.
The 'end' property accepts values of any type
Returns
-------
Any
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
# size
# ----
@property
def size(self):
"""
Sets the size of each x axis bin. Default behavior: If `nbinsx`
is 0 or omitted, we choose a nice round bin size such that the
number of bins is about the same as the typical number of
samples in each bin. If `nbinsx` is provided, we choose a nice
round bin size giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as in
`axis.dtick`. For category data, the number of categories to
bin together (always defaults to 1). If multiple non-overlaying
histograms share a subplot, the first explicit `size` is used
and all others discarded. If no `size` is provided,the sample
data from all traces is combined to determine `size` as
described above.
The 'size' property accepts values of any type
Returns
-------
Any
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# start
# -----
@property
def start(self):
"""
Sets the starting value for the x axis bins. Defaults to the
minimum data value, shifted down if necessary to make nice
round values and to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin edges 0.5 down,
so a `size` of 5 would have a default `start` of -0.5, so it is
clear that 0-4 are in the first bin, 5-9 in the second, but
continuous data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date string.
For category data, `start` is based on the category serial
numbers, and defaults to -0.5. If multiple non-overlaying
histograms share a subplot, the first explicit `start` is used
exactly and all others are shifted down (if necessary) to
differ from that one by an integer number of bins.
The 'start' property accepts values of any type
Returns
-------
Any
"""
return self["start"]
@start.setter
def start(self, val):
self["start"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
end
Sets the end value for the x axis bins. The last bin
may not end exactly at this value, we increment the bin
edge by `size` from `start` until we reach or exceed
`end`. Defaults to the maximum data value. Like
`start`, for dates use a date string, and for category
data `end` is based on the category serial numbers.
size
Sets the size of each x axis bin. Default behavior: If
`nbinsx` is 0 or omitted, we choose a nice round bin
size such that the number of bins is about the same as
the typical number of samples in each bin. If `nbinsx`
is provided, we choose a nice round bin size giving no
more than that many bins. For date data, use
milliseconds or "M<n>" for months, as in `axis.dtick`.
For category data, the number of categories to bin
together (always defaults to 1). If multiple non-
overlaying histograms share a subplot, the first
explicit `size` is used and all others discarded. If no
`size` is provided,the sample data from all traces is
combined to determine `size` as described above.
start
Sets the starting value for the x axis bins. Defaults
to the minimum data value, shifted down if necessary to
make nice round values and to remove ambiguous bin
edges. For example, if most of the data is integers we
shift the bin edges 0.5 down, so a `size` of 5 would
have a default `start` of -0.5, so it is clear that 0-4
are in the first bin, 5-9 in the second, but continuous
data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date
string. For category data, `start` is based on the
category serial numbers, and defaults to -0.5. If
multiple non-overlaying histograms share a subplot, the
first explicit `start` is used exactly and all others
are shifted down (if necessary) to differ from that one
by an integer number of bins.
"""
def __init__(self, arg=None, end=None, size=None, start=None, **kwargs):
"""
Construct a new XBins object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.XBins
end
Sets the end value for the x axis bins. The last bin
may not end exactly at this value, we increment the bin
edge by `size` from `start` until we reach or exceed
`end`. Defaults to the maximum data value. Like
`start`, for dates use a date string, and for category
data `end` is based on the category serial numbers.
size
Sets the size of each x axis bin. Default behavior: If
`nbinsx` is 0 or omitted, we choose a nice round bin
size such that the number of bins is about the same as
the typical number of samples in each bin. If `nbinsx`
is provided, we choose a nice round bin size giving no
more than that many bins. For date data, use
milliseconds or "M<n>" for months, as in `axis.dtick`.
For category data, the number of categories to bin
together (always defaults to 1). If multiple non-
overlaying histograms share a subplot, the first
explicit `size` is used and all others discarded. If no
`size` is provided,the sample data from all traces is
combined to determine `size` as described above.
start
Sets the starting value for the x axis bins. Defaults
to the minimum data value, shifted down if necessary to
make nice round values and to remove ambiguous bin
edges. For example, if most of the data is integers we
shift the bin edges 0.5 down, so a `size` of 5 would
have a default `start` of -0.5, so it is clear that 0-4
are in the first bin, 5-9 in the second, but continuous
data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date
string. For category data, `start` is based on the
category serial numbers, and defaults to -0.5. If
multiple non-overlaying histograms share a subplot, the
first explicit `start` is used exactly and all others
are shifted down (if necessary) to differ from that one
by an integer number of bins.
Returns
-------
XBins
"""
super(XBins, self).__init__("xbins")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.XBins
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.XBins"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import xbins as v_xbins
# Initialize validators
# ---------------------
self._validators["end"] = v_xbins.EndValidator()
self._validators["size"] = v_xbins.SizeValidator()
self._validators["start"] = v_xbins.StartValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("end", None)
self["end"] = end if end is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("start", None)
self["start"] = start if start is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.unselected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
Returns
-------
plotly_study.graph_objs.histogram.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.unselected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly_study.graph_objs.histogram.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly_study.graph_objects.histogram.unselected.Marker
instance or dict with compatible properties
textfont
plotly_study.graph_objects.histogram.unselected.Textfont
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.Unselected
marker
plotly_study.graph_objects.histogram.unselected.Marker
instance or dict with compatible properties
textfont
plotly_study.graph_objects.histogram.unselected.Textfont
instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__("unselected")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.Unselected
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.Unselected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import unselected as v_unselected
# Initialize validators
# ---------------------
self._validators["marker"] = v_unselected.MarkerValidator()
self._validators["textfont"] = v_unselected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
self["marker"] = marker if marker is not None else _v
_v = arg.pop("textfont", None)
self["textfont"] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.Stream
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.selected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
Returns
-------
plotly_study.graph_objs.histogram.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.selected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly_study.graph_objs.histogram.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly_study.graph_objects.histogram.selected.Marker instance
or dict with compatible properties
textfont
plotly_study.graph_objects.histogram.selected.Textfont
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.Selected
marker
plotly_study.graph_objects.histogram.selected.Marker instance
or dict with compatible properties
textfont
plotly_study.graph_objects.histogram.selected.Textfont
instance or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__("selected")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.Selected
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.Selected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import selected as v_selected
# Initialize validators
# ---------------------
self._validators["marker"] = v_selected.MarkerValidator()
self._validators["textfont"] = v_selected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
self["marker"] = marker if marker is not None else _v
_v = arg.pop("textfont", None)
self["textfont"] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to histogram.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of plotly_study.graph_objects.histogram.marke
r.colorbar.Tickformatstop instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.histogram.marker.colorbar.tickformatstopdefau
lts), sets the default property values to use
for elements of
histogram.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly_study.graph_objects.histogram.marker.colorbar.
Title instance or dict with compatible
properties
titlefont
Deprecated: Please use
histogram.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly_study.graph_objs.histogram.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly_study.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly_study.graph_objs.histogram.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the bars.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on plot.ly for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly_study.graph_objects.histogram.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
line
plotly_study.graph_objects.histogram.marker.Line instance or
dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly_study.graph_objects.histogram.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
line
plotly_study.graph_objects.histogram.marker.Line instance or
dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.Marker
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import marker as v_marker
# Initialize validators
# ---------------------
self._validators["autocolorscale"] = v_marker.AutocolorscaleValidator()
self._validators["cauto"] = v_marker.CautoValidator()
self._validators["cmax"] = v_marker.CmaxValidator()
self._validators["cmid"] = v_marker.CmidValidator()
self._validators["cmin"] = v_marker.CminValidator()
self._validators["color"] = v_marker.ColorValidator()
self._validators["coloraxis"] = v_marker.ColoraxisValidator()
self._validators["colorbar"] = v_marker.ColorBarValidator()
self._validators["colorscale"] = v_marker.ColorscaleValidator()
self._validators["colorsrc"] = v_marker.ColorsrcValidator()
self._validators["line"] = v_marker.LineValidator()
self._validators["opacity"] = v_marker.OpacityValidator()
self._validators["opacitysrc"] = v_marker.OpacitysrcValidator()
self._validators["reversescale"] = v_marker.ReversescaleValidator()
self._validators["showscale"] = v_marker.ShowscaleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
self["autocolorscale"] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop("cauto", None)
self["cauto"] = cauto if cauto is not None else _v
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmid", None)
self["cmid"] = cmid if cmid is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("coloraxis", None)
self["coloraxis"] = coloraxis if coloraxis is not None else _v
_v = arg.pop("colorbar", None)
self["colorbar"] = colorbar if colorbar is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("opacity", None)
self["opacity"] = opacity if opacity is not None else _v
_v = arg.pop("opacitysrc", None)
self["opacitysrc"] = opacitysrc if opacitysrc is not None else _v
_v = arg.pop("reversescale", None)
self["reversescale"] = reversescale if reversescale is not None else _v
_v = arg.pop("showscale", None)
self["showscale"] = showscale if showscale is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly_study.graph_objs.histogram.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly_study.graph_objs.histogram.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.Hoverlabel
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorY(_BaseTraceHierarchyType):
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on plot.ly for arrayminus .
The 'arrayminussrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on plot.ly for array .
The 'arraysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.ErrorY
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super(ErrorY, self).__init__("error_y")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.ErrorY
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.ErrorY"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import error_y as v_error_y
# Initialize validators
# ---------------------
self._validators["array"] = v_error_y.ArrayValidator()
self._validators["arrayminus"] = v_error_y.ArrayminusValidator()
self._validators["arrayminussrc"] = v_error_y.ArrayminussrcValidator()
self._validators["arraysrc"] = v_error_y.ArraysrcValidator()
self._validators["color"] = v_error_y.ColorValidator()
self._validators["symmetric"] = v_error_y.SymmetricValidator()
self._validators["thickness"] = v_error_y.ThicknessValidator()
self._validators["traceref"] = v_error_y.TracerefValidator()
self._validators["tracerefminus"] = v_error_y.TracerefminusValidator()
self._validators["type"] = v_error_y.TypeValidator()
self._validators["value"] = v_error_y.ValueValidator()
self._validators["valueminus"] = v_error_y.ValueminusValidator()
self._validators["visible"] = v_error_y.VisibleValidator()
self._validators["width"] = v_error_y.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
self["array"] = array if array is not None else _v
_v = arg.pop("arrayminus", None)
self["arrayminus"] = arrayminus if arrayminus is not None else _v
_v = arg.pop("arrayminussrc", None)
self["arrayminussrc"] = arrayminussrc if arrayminussrc is not None else _v
_v = arg.pop("arraysrc", None)
self["arraysrc"] = arraysrc if arraysrc is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("symmetric", None)
self["symmetric"] = symmetric if symmetric is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("traceref", None)
self["traceref"] = traceref if traceref is not None else _v
_v = arg.pop("tracerefminus", None)
self["tracerefminus"] = tracerefminus if tracerefminus is not None else _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
_v = arg.pop("valueminus", None)
self["valueminus"] = valueminus if valueminus is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorX(_BaseTraceHierarchyType):
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on plot.ly for arrayminus .
The 'arrayminussrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on plot.ly for array .
The 'arraysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# copy_ystyle
# -----------
@property
def copy_ystyle(self):
"""
The 'copy_ystyle' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["copy_ystyle"]
@copy_ystyle.setter
def copy_ystyle(self, val):
self["copy_ystyle"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
copy_ystyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
copy_ystyle=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorX object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.ErrorX
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
copy_ystyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorX
"""
super(ErrorX, self).__init__("error_x")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.ErrorX
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.ErrorX"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import error_x as v_error_x
# Initialize validators
# ---------------------
self._validators["array"] = v_error_x.ArrayValidator()
self._validators["arrayminus"] = v_error_x.ArrayminusValidator()
self._validators["arrayminussrc"] = v_error_x.ArrayminussrcValidator()
self._validators["arraysrc"] = v_error_x.ArraysrcValidator()
self._validators["color"] = v_error_x.ColorValidator()
self._validators["copy_ystyle"] = v_error_x.CopyYstyleValidator()
self._validators["symmetric"] = v_error_x.SymmetricValidator()
self._validators["thickness"] = v_error_x.ThicknessValidator()
self._validators["traceref"] = v_error_x.TracerefValidator()
self._validators["tracerefminus"] = v_error_x.TracerefminusValidator()
self._validators["type"] = v_error_x.TypeValidator()
self._validators["value"] = v_error_x.ValueValidator()
self._validators["valueminus"] = v_error_x.ValueminusValidator()
self._validators["visible"] = v_error_x.VisibleValidator()
self._validators["width"] = v_error_x.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
self["array"] = array if array is not None else _v
_v = arg.pop("arrayminus", None)
self["arrayminus"] = arrayminus if arrayminus is not None else _v
_v = arg.pop("arrayminussrc", None)
self["arrayminussrc"] = arrayminussrc if arrayminussrc is not None else _v
_v = arg.pop("arraysrc", None)
self["arraysrc"] = arraysrc if arraysrc is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("copy_ystyle", None)
self["copy_ystyle"] = copy_ystyle if copy_ystyle is not None else _v
_v = arg.pop("symmetric", None)
self["symmetric"] = symmetric if symmetric is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("traceref", None)
self["traceref"] = traceref if traceref is not None else _v
_v = arg.pop("tracerefminus", None)
self["tracerefminus"] = tracerefminus if tracerefminus is not None else _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
_v = arg.pop("valueminus", None)
self["valueminus"] = valueminus if valueminus is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Cumulative(_BaseTraceHierarchyType):
# currentbin
# ----------
@property
def currentbin(self):
"""
Only applies if cumulative is enabled. Sets whether the current
bin is included, excluded, or has half of its value included in
the current cumulative value. "include" is the default for
compatibility with various other tools, however it introduces a
half-bin bias to the results. "exclude" makes the opposite
half-bin bias, and "half" removes it.
The 'currentbin' property is an enumeration that may be specified as:
- One of the following enumeration values:
['include', 'exclude', 'half']
Returns
-------
Any
"""
return self["currentbin"]
@currentbin.setter
def currentbin(self, val):
self["currentbin"] = val
# direction
# ---------
@property
def direction(self):
"""
Only applies if cumulative is enabled. If "increasing"
(default) we sum all prior bins, so the result increases from
left to right. If "decreasing" we sum later bins so the result
decreases from left to right.
The 'direction' property is an enumeration that may be specified as:
- One of the following enumeration values:
['increasing', 'decreasing']
Returns
-------
Any
"""
return self["direction"]
@direction.setter
def direction(self, val):
self["direction"] = val
# enabled
# -------
@property
def enabled(self):
"""
If true, display the cumulative distribution by summing the
binned values. Use the `direction` and `centralbin` attributes
to tune the accumulation method. Note: in this mode, the
"density" `histnorm` settings behave the same as their
equivalents without "density": "" and "density" both rise to
the number of data points, and "probability" and *probability
density* both rise to the number of sample points.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "histogram"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
currentbin
Only applies if cumulative is enabled. Sets whether the
current bin is included, excluded, or has half of its
value included in the current cumulative value.
"include" is the default for compatibility with various
other tools, however it introduces a half-bin bias to
the results. "exclude" makes the opposite half-bin
bias, and "half" removes it.
direction
Only applies if cumulative is enabled. If "increasing"
(default) we sum all prior bins, so the result
increases from left to right. If "decreasing" we sum
later bins so the result decreases from left to right.
enabled
If true, display the cumulative distribution by summing
the binned values. Use the `direction` and `centralbin`
attributes to tune the accumulation method. Note: in
this mode, the "density" `histnorm` settings behave the
same as their equivalents without "density": "" and
"density" both rise to the number of data points, and
"probability" and *probability density* both rise to
the number of sample points.
"""
def __init__(
self, arg=None, currentbin=None, direction=None, enabled=None, **kwargs
):
"""
Construct a new Cumulative object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly_study.graph_objs.histogram.Cumulative
currentbin
Only applies if cumulative is enabled. Sets whether the
current bin is included, excluded, or has half of its
value included in the current cumulative value.
"include" is the default for compatibility with various
other tools, however it introduces a half-bin bias to
the results. "exclude" makes the opposite half-bin
bias, and "half" removes it.
direction
Only applies if cumulative is enabled. If "increasing"
(default) we sum all prior bins, so the result
increases from left to right. If "decreasing" we sum
later bins so the result decreases from left to right.
enabled
If true, display the cumulative distribution by summing
the binned values. Use the `direction` and `centralbin`
attributes to tune the accumulation method. Note: in
this mode, the "density" `histnorm` settings behave the
same as their equivalents without "density": "" and
"density" both rise to the number of data points, and
"probability" and *probability density* both rise to
the number of sample points.
Returns
-------
Cumulative
"""
super(Cumulative, self).__init__("cumulative")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.histogram.Cumulative
constructor must be a dict or
an instance of plotly_study.graph_objs.histogram.Cumulative"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.histogram import cumulative as v_cumulative
# Initialize validators
# ---------------------
self._validators["currentbin"] = v_cumulative.CurrentbinValidator()
self._validators["direction"] = v_cumulative.DirectionValidator()
self._validators["enabled"] = v_cumulative.EnabledValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("currentbin", None)
self["currentbin"] = currentbin if currentbin is not None else _v
_v = arg.pop("direction", None)
self["direction"] = direction if direction is not None else _v
_v = arg.pop("enabled", None)
self["enabled"] = enabled if enabled is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Cumulative",
"ErrorX",
"ErrorY",
"Hoverlabel",
"Marker",
"Selected",
"Stream",
"Unselected",
"XBins",
"YBins",
"hoverlabel",
"marker",
"selected",
"unselected",
]
from plotly_study.graph_objs.histogram import unselected
from plotly_study.graph_objs.histogram import selected
from plotly_study.graph_objs.histogram import marker
from plotly_study.graph_objs.histogram import hoverlabel
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
serv/serv.go
|
package serv
import (
"context"
"errors"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
"github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode"
"github.com/go-pg/pg"
"github.com/gobuffalo/flect"
"github.com/rs/zerolog"
"github.com/spf13/viper"
)
//go:generate esc -o static.go -ignore \\.DS_Store -prefix ../web/build -private -pkg serv ../web/build
const (
serverName = "Super Graph"
authFailBlockAlways = iota + 1
authFailBlockPerQuery
authFailBlockNever
)
var (
logger *zerolog.Logger
conf *config
db *pg.DB
qcompile *qcode.Compiler
pcompile *psql.Compiler
authFailBlock int
)
func initLog() *zerolog.Logger {
logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).
With().
Timestamp().
Caller().
Logger()
return &logger
/*
log := logrus.New()
logger.Formatter = new(logrus.TextFormatter)
logger.Formatter.(*logrus.TextFormatter).DisableColors = false
logger.Formatter.(*logrus.TextFormatter).DisableTimestamp = true
logger.Level = logrus.TraceLevel
logger.Out = os.Stdout
*/
}
func initConf(path string) (*config, error) {
vi := viper.New()
vi.SetEnvPrefix("SG")
vi.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
vi.AutomaticEnv()
vi.AddConfigPath(path)
vi.AddConfigPath("./config")
vi.SetConfigName(getConfigName())
vi.SetDefault("host_port", "0.0.0.0:8080")
vi.SetDefault("web_ui", false)
vi.SetDefault("enable_tracing", false)
vi.SetDefault("auth_fail_block", "always")
vi.SetDefault("database.type", "postgres")
vi.SetDefault("database.host", "localhost")
vi.SetDefault("database.port", 5432)
vi.SetDefault("database.user", "postgres")
vi.SetDefault("env", "development")
vi.BindEnv("env", "GO_ENV")
vi.BindEnv("HOST", "HOST")
vi.BindEnv("PORT", "PORT")
vi.SetDefault("auth.rails.max_idle", 80)
vi.SetDefault("auth.rails.max_active", 12000)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
c := &config{}
if err := vi.Unmarshal(c); err != nil {
return nil, fmt.Errorf("unable to decode config, %v", err)
}
for k, v := range c.Inflections {
flect.AddPlural(k, v)
}
if len(c.DB.Tables) == 0 {
c.DB.Tables = c.DB.Fields
}
for i := range c.DB.Tables {
t := c.DB.Tables[i]
t.Name = flect.Pluralize(strings.ToLower(t.Name))
}
authFailBlock = getAuthFailBlock(c)
//fmt.Printf("%#v", c)
return c, nil
}
func initDB(c *config) (*pg.DB, error) {
opt := &pg.Options{
Addr: strings.Join([]string{c.DB.Host, c.DB.Port}, ":"),
User: c.DB.User,
Password: c.DB.Password,
Database: c.DB.DBName,
ApplicationName: c.AppName,
}
if c.DB.PoolSize != 0 {
opt.PoolSize = conf.DB.PoolSize
}
if c.DB.MaxRetries != 0 {
opt.MaxRetries = c.DB.MaxRetries
}
if len(c.DB.Schema) != 0 {
opt.OnConnect = func(conn *pg.Conn) error {
_, err := conn.Exec("set search_path=?", c.DB.Schema)
if err != nil {
return err
}
return nil
}
}
db := pg.Connect(opt)
if db == nil {
return nil, errors.New("failed to connect to postgres db")
}
return db, nil
}
func initCompilers(c *config) (*qcode.Compiler, *psql.Compiler, error) {
schema, err := psql.NewDBSchema(db, c.getAliasMap())
if err != nil {
return nil, nil, err
}
qc, err := qcode.NewCompiler(qcode.Config{
DefaultFilter: c.DB.Defaults.Filter,
FilterMap: c.getFilterMap(),
Blacklist: c.DB.Defaults.Blacklist,
KeepArgs: false,
})
if err != nil {
return nil, nil, err
}
pc := psql.NewCompiler(psql.Config{
Schema: schema,
Vars: c.getVariables(),
})
return qc, pc, nil
}
func Init() {
var err error
path := flag.String("path", "./", "Path to config files")
flag.Parse()
logger = initLog()
conf, err = initConf(*path)
if err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
}
logLevel, err := zerolog.ParseLevel(conf.LogLevel)
if err != nil {
logger.Error().Err(err).Msg("error setting log_level")
}
zerolog.SetGlobalLevel(logLevel)
db, err = initDB(conf)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
}
qcompile, pcompile, err = initCompilers(conf)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
}
if err := initResolvers(); err != nil {
logger.Fatal().Err(err).Msg("failed to initialized resolvers")
}
initAllowList(*path)
initPreparedList()
startHTTP()
}
func startHTTP() {
hp := strings.SplitN(conf.HostPort, ":", 2)
if len(conf.Host) != 0 {
hp[0] = conf.Host
}
if len(conf.Port) != 0 {
hp[1] = conf.Port
}
hostPort := fmt.Sprintf("%s:%s", hp[0], hp[1])
srv := &http.Server{
Addr: hostPort,
Handler: routeHandler(),
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
idleConnsClosed := make(chan struct{})
go func() {
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt)
<-sigint
if err := srv.Shutdown(context.Background()); err != nil {
logger.Error().Err(err).Msg("shutdown signal received")
}
close(idleConnsClosed)
}()
srv.RegisterOnShutdown(func() {
if err := db.Close(); err != nil {
logger.Error().Err(err).Msg("db closed")
}
})
fmt.Printf("%s listening on %s (%s)\n", serverName, hostPort, conf.Env)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
logger.Error().Err(err).Msg("server closed")
}
<-idleConnsClosed
}
func routeHandler() http.Handler {
mux := http.NewServeMux()
mux.Handle("/api/v1/graphql", withAuth(apiv1Http))
if conf.WebUI {
mux.Handle("/", http.FileServer(_escFS(false)))
}
fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", serverName)
mux.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
func getConfigName() string {
ge := strings.ToLower(os.Getenv("GO_ENV"))
switch {
case strings.HasPrefix(ge, "pro"):
return "prod"
case strings.HasPrefix(ge, "sta"):
return "stage"
case strings.HasPrefix(ge, "tes"):
return "test"
}
return "dev"
}
func getAuthFailBlock(c *config) int {
switch c.AuthFailBlock {
case "always":
return authFailBlockAlways
case "per_query", "perquery", "query":
return authFailBlockPerQuery
case "never", "false":
return authFailBlockNever
}
return authFailBlockAlways
}
|
[
"\"GO_ENV\""
] |
[] |
[
"GO_ENV"
] |
[]
|
["GO_ENV"]
|
go
| 1 | 0 | |
sumo_rl/environment/env.py
|
import os
import sys
from pathlib import Path
from typing import Optional, Union, Tuple
import sumo_rl
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import traci
import sumolib
import gym
from gym.envs.registration import EnvSpec
import numpy as np
import pandas as pd
from .traffic_signal import TrafficSignal
from gym.utils import EzPickle, seeding
from pettingzoo import AECEnv
from pettingzoo.utils.agent_selector import agent_selector
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector, wrappers
from pettingzoo.utils.conversions import parallel_wrapper_fn
LIBSUMO = 'LIBSUMO_AS_TRACI' in os.environ
def env(**kwargs):
env = SumoEnvironmentPZ(**kwargs)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class SumoEnvironment(gym.Env):
"""
SUMO Environment for Traffic Signal Control
:param net_file: (str) SUMO .net.xml file
:param route_file: (str) SUMO .rou.xml file
:param out_csv_name: (Optional[str]) name of the .csv output with simulation results. If None no output is generated
:param use_gui: (bool) Wheter to run SUMO simulation with GUI visualisation
:param virtual_display: (Optional[Tuple[int,int]]) Resolution of a virtual display for rendering
:param begin_time: (int) The time step (in seconds) the simulation starts
:param num_seconds: (int) Number of simulated seconds on SUMO. The time in seconds the simulation must end.
:param max_depart_delay: (int) Vehicles are discarded if they could not be inserted after max_depart_delay seconds
:param delta_time: (int) Simulation seconds between actions
:param min_green: (int) Minimum green time in a phase
:param max_green: (int) Max green time in a phase
:single_agent: (bool) If true, it behaves like a regular gym.Env. Else, it behaves like a MultiagentEnv (https://github.com/ray-project/ray/blob/master/python/ray/rllib/env/multi_agent_env.py)
:sumo_seed: (int/string) Random seed for sumo. If 'random' it uses a randomly chosen seed.
:fixed_ts: (bool) If true, it will follow the phase configuration in the route_file and ignore the actions.
:sumo_warnings: (bool) If False, remove SUMO warnings in the terminal
"""
CONNECTION_LABEL = 0 # For traci multi-client support
def __init__(
self,
net_file: str,
route_file: str,
out_csv_name: Optional[str] = None,
use_gui: bool = False,
virtual_display: Optional[Tuple[int,int]] = None,
begin_time: int = 0,
num_seconds: int = 20000,
max_depart_delay: int = 100000,
time_to_teleport: int = -1,
delta_time: int = 5,
yellow_time: int = 2,
min_green: int = 5,
max_green: int = 50,
single_agent: bool = False,
sumo_seed: Union[str,int] = 'random',
fixed_ts: bool = False,
sumo_warnings: bool = True,
):
self._net = net_file
self._route = route_file
self.use_gui = use_gui
if self.use_gui:
self._sumo_binary = sumolib.checkBinary('sumo-gui')
else:
self._sumo_binary = sumolib.checkBinary('sumo')
self.virtual_display = virtual_display
assert delta_time > yellow_time, "Time between actions must be at least greater than yellow time."
self.begin_time = begin_time
self.sim_max_time = num_seconds
self.delta_time = delta_time # seconds on sumo at each step
self.max_depart_delay = max_depart_delay # Max wait time to insert a vehicle
self.time_to_teleport = time_to_teleport
self.min_green = min_green
self.max_green = max_green
self.yellow_time = yellow_time
self.single_agent = single_agent
self.sumo_seed = sumo_seed
self.fixed_ts = fixed_ts
self.sumo_warnings = sumo_warnings
self.label = str(SumoEnvironment.CONNECTION_LABEL)
SumoEnvironment.CONNECTION_LABEL += 1
self.sumo = None
if LIBSUMO:
traci.start([sumolib.checkBinary('sumo'), '-n', self._net]) # Start only to retrieve traffic light information
conn = traci
else:
traci.start([sumolib.checkBinary('sumo'), '-n', self._net], label='init_connection'+self.label)
conn = traci.getConnection('init_connection'+self.label)
self.ts_ids = list(conn.trafficlight.getIDList())
self.traffic_signals = {ts: TrafficSignal(self,
ts,
self.delta_time,
self.yellow_time,
self.min_green,
self.max_green,
self.begin_time,
conn) for ts in self.ts_ids}
conn.close()
self.vehicles = dict()
self.reward_range = (-float('inf'), float('inf'))
self.metadata = {}
self.spec = EnvSpec('SUMORL-v0')
self.run = 0
self.metrics = []
self.out_csv_name = out_csv_name
self.observations = {ts: None for ts in self.ts_ids}
self.rewards = {ts: None for ts in self.ts_ids}
def _start_simulation(self):
sumo_cmd = [self._sumo_binary,
'-n', self._net,
'-r', self._route,
'--max-depart-delay', str(self.max_depart_delay),
'--waiting-time-memory', '10000',
'--time-to-teleport', str(self.time_to_teleport)]
if self.begin_time > 0:
sumo_cmd.append('-b {}'.format(self.begin_time))
if self.sumo_seed == 'random':
sumo_cmd.append('--random')
else:
sumo_cmd.extend(['--seed', str(self.sumo_seed)])
if not self.sumo_warnings:
sumo_cmd.append('--no-warnings')
if self.use_gui:
sumo_cmd.extend(['--start', '--quit-on-end'])
if self.virtual_display is not None:
sumo_cmd.extend(['--window-size', f'{self.virtual_display[0]},{self.virtual_display[1]}'])
from pyvirtualdisplay.smartdisplay import SmartDisplay
print("Creating a virtual display.")
self.disp = SmartDisplay(size=self.virtual_display)
self.disp.start()
print("Virtual display started.")
if LIBSUMO:
traci.start(sumo_cmd)
self.sumo = traci
else:
traci.start(sumo_cmd, label=self.label)
self.sumo = traci.getConnection(self.label)
if self.use_gui:
self.sumo.gui.setSchema(traci.gui.DEFAULT_VIEW, "real world")
def reset(self):
if self.run != 0:
self.close()
self.save_csv(self.out_csv_name, self.run)
self.run += 1
self.metrics = []
self._start_simulation()
self.traffic_signals = {ts: TrafficSignal(self,
ts,
self.delta_time,
self.yellow_time,
self.min_green,
self.max_green,
self.begin_time,
self.sumo) for ts in self.ts_ids}
self.vehicles = dict()
if self.single_agent:
return self._compute_observations()[self.ts_ids[0]]
else:
return self._compute_observations()
@property
def sim_step(self):
"""
Return current simulation second on SUMO
"""
return self.sumo.simulation.getTime()
def step(self, action):
# No action, follow fixed TL defined in self.phases
if action is None or action == {}:
for _ in range(self.delta_time):
self._sumo_step()
else:
self._apply_actions(action)
self._run_steps()
observations = self._compute_observations()
rewards = self._compute_rewards()
dones = self._compute_dones()
self._compute_info()
if self.single_agent:
return observations[self.ts_ids[0]], rewards[self.ts_ids[0]], dones['__all__'], {}
else:
return observations, rewards, dones, {}
def _run_steps(self):
time_to_act = False
while not time_to_act:
self._sumo_step()
for ts in self.ts_ids:
self.traffic_signals[ts].update()
if self.traffic_signals[ts].time_to_act:
time_to_act = True
def _apply_actions(self, actions):
"""
Set the next green phase for the traffic signals
:param actions: If single-agent, actions is an int between 0 and self.num_green_phases (next green phase)
If multiagent, actions is a dict {ts_id : greenPhase}
"""
if self.single_agent:
if self.traffic_signals[self.ts_ids[0]].time_to_act:
self.traffic_signals[self.ts_ids[0]].set_next_phase(actions)
else:
for ts, action in actions.items():
if self.traffic_signals[ts].time_to_act:
self.traffic_signals[ts].set_next_phase(action)
def _compute_dones(self):
dones = {ts_id: False for ts_id in self.ts_ids}
dones['__all__'] = self.sim_step > self.sim_max_time
return dones
def _compute_info(self):
info = self._compute_step_info()
self.metrics.append(info)
def _compute_observations(self):
self.observations.update({ts: self.traffic_signals[ts].compute_observation() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})
return {ts: self.observations[ts].copy() for ts in self.observations.keys() if self.traffic_signals[ts].time_to_act}
def _compute_rewards(self):
self.rewards.update({ts: self.traffic_signals[ts].compute_reward() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})
return {ts: self.rewards[ts] for ts in self.rewards.keys() if self.traffic_signals[ts].time_to_act}
@property
def observation_space(self):
return self.traffic_signals[self.ts_ids[0]].observation_space
@property
def action_space(self):
return self.traffic_signals[self.ts_ids[0]].action_space
def observation_spaces(self, ts_id):
return self.traffic_signals[ts_id].observation_space
def action_spaces(self, ts_id):
return self.traffic_signals[ts_id].action_space
def _sumo_step(self):
self.sumo.simulationStep()
def _compute_step_info(self):
return {
'step_time': self.sim_step,
'reward': self.traffic_signals[self.ts_ids[0]].last_reward,
'total_stopped': sum(self.traffic_signals[ts].get_total_queued() for ts in self.ts_ids),
'total_wait_time': sum(sum(self.traffic_signals[ts].get_waiting_time_per_lane()) for ts in self.ts_ids)
}
def close(self):
if self.sumo is None:
return
if not LIBSUMO:
traci.switch(self.label)
traci.close()
try:
self.disp.stop()
except AttributeError:
pass
self.sumo = None
def __del__(self):
self.close()
def render(self, mode='human'):
if self.virtual_display:
#img = self.sumo.gui.screenshot(traci.gui.DEFAULT_VIEW,
# f"temp/img{self.sim_step}.jpg",
# width=self.virtual_display[0],
# height=self.virtual_display[1])
img = self.disp.grab()
if mode == 'rgb_array':
return np.array(img)
return img
def save_csv(self, out_csv_name, run):
if out_csv_name is not None:
df = pd.DataFrame(self.metrics)
Path(Path(out_csv_name).parent).mkdir(parents=True, exist_ok=True)
df.to_csv(out_csv_name + '_conn{}_run{}'.format(self.label, run) + '.csv', index=False)
# Below functions are for discrete state space
def encode(self, state, ts_id):
phase = int(np.where(state[:self.traffic_signals[ts_id].num_green_phases] == 1)[0])
min_green = state[self.traffic_signals[ts_id].num_green_phases]
density_queue = [self._discretize_density(d) for d in state[self.traffic_signals[ts_id].num_green_phases + 1:]]
# tuples are hashable and can be used as key in python dictionary
return tuple([phase, min_green] + density_queue)
def _discretize_density(self, density):
return min(int(density*10), 9)
class SumoEnvironmentPZ(AECEnv, EzPickle):
metadata = {'render.modes': ['human', 'rgb_array'], 'name': "sumo_rl_v0"}
def __init__(self, **kwargs):
EzPickle.__init__(self, **kwargs)
self._kwargs = kwargs
self.seed()
self.env = SumoEnvironment(**self._kwargs)
self.agents = self.env.ts_ids
self.possible_agents = self.env.ts_ids
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
# spaces
self.action_spaces = {a: self.env.action_spaces(a) for a in self.agents}
self.observation_spaces = {a: self.env.observation_spaces(a) for a in self.agents}
# dicts
self.rewards = {a: 0 for a in self.agents}
self.dones = {a: False for a in self.agents}
self.infos = {a: {} for a in self.agents}
def seed(self, seed=None):
self.randomizer, seed = seeding.np_random(seed)
def reset(self):
self.env.reset()
self.agents = self.possible_agents[:]
self.agent_selection = self._agent_selector.reset()
self.rewards = {agent: 0 for agent in self.agents}
self._cumulative_rewards = {agent: 0 for agent in self.agents}
self.dones = {agent: False for agent in self.agents}
self.infos = {agent: {} for agent in self.agents}
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def observe(self, agent):
obs = self.env.observations[agent].copy()
return obs
def state(self):
raise NotImplementedError('Method state() currently not implemented.')
def close(self):
self.env.close()
def render(self, mode='human'):
return self.env.render(mode)
def save_csv(self, out_csv_name, run):
self.env.save_csv(out_csv_name, run)
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
agent = self.agent_selection
if not self.action_spaces[agent].contains(action):
raise Exception('Action for agent {} must be in Discrete({}).'
'It is currently {}'.format(agent, self.action_spaces[agent].n, action))
self.env._apply_actions({agent: action})
if self._agent_selector.is_last():
self.env._run_steps()
self.env._compute_observations()
self.rewards = self.env._compute_rewards()
self.env._compute_info()
else:
self._clear_rewards()
done = self.env._compute_dones()['__all__']
self.dones = {a : done for a in self.agents}
self.agent_selection = self._agent_selector.next()
self._cumulative_rewards[agent] = 0
self._accumulate_rewards()
|
[] |
[] |
[
"SUMO_HOME"
] |
[]
|
["SUMO_HOME"]
|
python
| 1 | 0 | |
python/ray/tests/test_placement_group.py
|
import pytest
import os
import sys
import time
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import ray
from ray.test_utils import (generate_system_config_map, get_other_nodes,
kill_actor_and_wait_for_failure,
run_string_as_driver, wait_for_condition,
get_error_message)
import ray.cluster_utils
from ray.exceptions import RaySystemError
from ray._raylet import PlacementGroupID
from ray.util.placement_group import (PlacementGroup, placement_group,
remove_placement_group,
get_current_placement_group)
from ray.util.client.ray_client_helpers import connect_to_client_or_not
@ray.remote
class Increase:
def method(self, x):
return x + 2
@pytest.mark.parametrize("connect_to_client", [True, False])
def test_placement_ready(ray_start_regular, connect_to_client):
@ray.remote
class Actor:
def __init__(self):
pass
def v(self):
return 10
# bundle is placement group reserved resources and can't be used in bundles
with pytest.raises(Exception):
ray.util.placement_group(bundles=[{"bundle": 1}])
# This test is to test the case that even there all resource in the
# bundle got allocated, we are still able to return from ready[I
# since ready use 0 CPU
with connect_to_client_or_not(connect_to_client):
pg = ray.util.placement_group(bundles=[{"CPU": 1}])
ray.get(pg.ready())
a = Actor.options(num_cpus=1, placement_group=pg).remote()
ray.get(a.v.remote())
ray.get(pg.ready())
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_pack(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
num_nodes = 2
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name",
strategy="PACK",
bundles=[
{
"CPU": 2,
"GPU": 0 # Test 0 resource spec doesn't break tests.
},
{
"CPU": 2
}
])
ray.get(placement_group.ready())
actor_1 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0).remote()
actor_2 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1).remote()
ray.get(actor_1.value.remote())
ray.get(actor_2.value.remote())
# Get all actors.
actor_infos = ray.state.actors()
# Make sure all actors in counter_list are collocated in one node.
actor_info_1 = actor_infos.get(actor_1._actor_id.hex())
actor_info_2 = actor_infos.get(actor_2._actor_id.hex())
assert actor_info_1 and actor_info_2
node_of_actor_1 = actor_info_1["Address"]["NodeID"]
node_of_actor_2 = actor_info_2["Address"]["NodeID"]
assert node_of_actor_1 == node_of_actor_2
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_strict_pack(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
num_nodes = 2
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name",
strategy="STRICT_PACK",
bundles=[
{
"memory": 50 * 1024 *
1024, # Test memory resource spec doesn't break tests.
"CPU": 2
},
{
"CPU": 2
}
])
ray.get(placement_group.ready())
actor_1 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0).remote()
actor_2 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1).remote()
ray.get(actor_1.value.remote())
ray.get(actor_2.value.remote())
# Get all actors.
actor_infos = ray.state.actors()
# Make sure all actors in counter_list are collocated in one node.
actor_info_1 = actor_infos.get(actor_1._actor_id.hex())
actor_info_2 = actor_infos.get(actor_2._actor_id.hex())
assert actor_info_1 and actor_info_2
node_of_actor_1 = actor_info_1["Address"]["NodeID"]
node_of_actor_2 = actor_info_2["Address"]["NodeID"]
assert node_of_actor_1 == node_of_actor_2
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_spread(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
num_nodes = 2
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name", strategy="SPREAD", bundles=[{
"CPU": 2
}, {
"CPU": 2
}])
ray.get(placement_group.ready())
actor_1 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0).remote()
actor_2 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1).remote()
ray.get(actor_1.value.remote())
ray.get(actor_2.value.remote())
# Get all actors.
actor_infos = ray.state.actors()
# Make sure all actors in counter_list are located in separate nodes.
actor_info_1 = actor_infos.get(actor_1._actor_id.hex())
actor_info_2 = actor_infos.get(actor_2._actor_id.hex())
assert actor_info_1 and actor_info_2
node_of_actor_1 = actor_info_1["Address"]["NodeID"]
node_of_actor_2 = actor_info_2["Address"]["NodeID"]
assert node_of_actor_1 != node_of_actor_2
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_strict_spread(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name",
strategy="STRICT_SPREAD",
bundles=[{
"CPU": 2
}, {
"CPU": 2
}, {
"CPU": 2
}])
ray.get(placement_group.ready())
actor_1 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0).remote()
actor_2 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1).remote()
actor_3 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=2).remote()
ray.get(actor_1.value.remote())
ray.get(actor_2.value.remote())
ray.get(actor_3.value.remote())
# Get all actors.
actor_infos = ray.state.actors()
# Make sure all actors in counter_list are located in separate nodes.
actor_info_1 = actor_infos.get(actor_1._actor_id.hex())
actor_info_2 = actor_infos.get(actor_2._actor_id.hex())
actor_info_3 = actor_infos.get(actor_3._actor_id.hex())
assert actor_info_1 and actor_info_2 and actor_info_3
node_of_actor_1 = actor_info_1["Address"]["NodeID"]
node_of_actor_2 = actor_info_2["Address"]["NodeID"]
node_of_actor_3 = actor_info_3["Address"]["NodeID"]
assert node_of_actor_1 != node_of_actor_2
assert node_of_actor_1 != node_of_actor_3
assert node_of_actor_2 != node_of_actor_3
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_actor_resource_ids(ray_start_cluster,
connect_to_client):
@ray.remote(num_cpus=1)
class F:
def f(self):
return ray.worker.get_resource_ids()
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
g1 = ray.util.placement_group([{"CPU": 2}])
a1 = F.options(placement_group=g1).remote()
resources = ray.get(a1.f.remote())
assert len(resources) == 1, resources
assert "CPU_group_" in list(resources.keys())[0], resources
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_task_resource_ids(ray_start_cluster,
connect_to_client):
@ray.remote(num_cpus=1)
def f():
return ray.worker.get_resource_ids()
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
g1 = ray.util.placement_group([{"CPU": 2}])
o1 = f.options(placement_group=g1).remote()
resources = ray.get(o1)
assert len(resources) == 1, resources
assert "CPU_group_" in list(resources.keys())[0], resources
assert "CPU_group_0_" not in list(resources.keys())[0], resources
# Now retry with a bundle index constraint.
o1 = f.options(
placement_group=g1, placement_group_bundle_index=0).remote()
resources = ray.get(o1)
assert len(resources) == 2, resources
keys = list(resources.keys())
assert "CPU_group_" in keys[0], resources
assert "CPU_group_" in keys[1], resources
assert ("CPU_group_0_" in keys[0]
or "CPU_group_0_" in keys[1]), resources
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_hang(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=1)
def f():
return ray.worker.get_resource_ids()
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
# Warm workers up, so that this triggers the hang rice.
ray.get(f.remote())
g1 = ray.util.placement_group([{"CPU": 2}])
# This will start out infeasible. The placement group will then be
# created and it transitions to feasible.
o1 = f.options(placement_group=g1).remote()
resources = ray.get(o1)
assert len(resources) == 1, resources
assert "CPU_group_" in list(resources.keys())[0], resources
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_remove_placement_group(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
# First try to remove a placement group that doesn't
# exist. This should not do anything.
random_group_id = PlacementGroupID.from_random()
random_placement_group = PlacementGroup(random_group_id)
for _ in range(3):
ray.util.remove_placement_group(random_placement_group)
# Creating a placement group as soon as it is
# created should work.
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
ray.util.remove_placement_group(placement_group)
def is_placement_group_removed():
table = ray.util.placement_group_table(placement_group)
if "state" not in table:
return False
return table["state"] == "REMOVED"
wait_for_condition(is_placement_group_removed)
# # Now let's create a placement group.
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
# Create an actor that occupies resources.
@ray.remote(num_cpus=2)
class A:
def f(self):
return 3
# Currently, there's no way to prevent
# tasks to be retried for removed placement group.
# Set max_retrie=0 for testing.
# TODO(sang): Handle this edge case.
@ray.remote(num_cpus=2, max_retries=0)
def long_running_task():
print(os.getpid())
import time
time.sleep(50)
# Schedule a long running task and actor.
task_ref = long_running_task.options(
placement_group=placement_group).remote()
a = A.options(placement_group=placement_group).remote()
assert ray.get(a.f.remote()) == 3
ray.util.remove_placement_group(placement_group)
# Subsequent remove request shouldn't do anything.
for _ in range(3):
ray.util.remove_placement_group(placement_group)
# Make sure placement group resources are
# released and we can schedule this task.
@ray.remote(num_cpus=4)
def f():
return 3
assert ray.get(f.remote()) == 3
# Since the placement group is removed,
# the actor should've been killed.
# That means this request should fail.
with pytest.raises(ray.exceptions.RayActorError, match="actor died"):
ray.get(a.f.remote(), timeout=3.0)
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(task_ref)
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_remove_pending_placement_group(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
# Create a placement group that cannot be scheduled now.
placement_group = ray.util.placement_group([{"GPU": 2}, {"CPU": 2}])
ray.util.remove_placement_group(placement_group)
# TODO(sang): Add state check here.
@ray.remote(num_cpus=4)
def f():
return 3
# Make sure this task is still schedulable.
assert ray.get(f.remote()) == 3
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_table(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
num_nodes = 2
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
# Originally placement group creation should be pending because
# there are no resources.
name = "name"
strategy = "PACK"
bundles = [{"CPU": 2, "GPU": 1}, {"CPU": 2}]
placement_group = ray.util.placement_group(
name=name, strategy=strategy, bundles=bundles)
result = ray.util.placement_group_table(placement_group)
assert result["name"] == name
assert result["strategy"] == strategy
for i in range(len(bundles)):
assert bundles[i] == result["bundles"][i]
assert result["state"] == "PENDING"
# Now the placement group should be scheduled.
cluster.add_node(num_cpus=5, num_gpus=1)
cluster.wait_for_nodes()
actor_1 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0).remote()
ray.get(actor_1.value.remote())
result = ray.util.placement_group_table(placement_group)
assert result["state"] == "CREATED"
# Add tow more placement group for placement group table test.
second_strategy = "SPREAD"
ray.util.placement_group(
name="second_placement_group",
strategy=second_strategy,
bundles=bundles)
ray.util.placement_group(
name="third_placement_group",
strategy=second_strategy,
bundles=bundles)
placement_group_table = ray.util.placement_group_table()
assert len(placement_group_table) == 3
true_name_set = {
"name", "second_placement_group", "third_placement_group"
}
get_name_set = set()
for _, placement_group_data in placement_group_table.items():
get_name_set.add(placement_group_data["name"])
assert true_name_set == get_name_set
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_cuda_visible_devices(ray_start_cluster, connect_to_client):
@ray.remote(num_gpus=1)
def f():
return os.environ["CUDA_VISIBLE_DEVICES"]
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_gpus=1)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
g1 = ray.util.placement_group([{"CPU": 1, "GPU": 1}])
o1 = f.options(placement_group=g1).remote()
devices = ray.get(o1)
assert devices == "0", devices
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_reschedule_when_node_dead(ray_start_cluster,
connect_to_client):
@ray.remote(num_cpus=1)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
cluster.add_node(num_cpus=4)
cluster.add_node(num_cpus=4)
cluster.wait_for_nodes()
ray.init(address=cluster.address, namespace="")
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 3
assert nodes[0]["alive"] and nodes[1]["alive"] and nodes[2]["alive"]
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name",
strategy="SPREAD",
bundles=[{
"CPU": 2
}, {
"CPU": 2
}, {
"CPU": 2
}])
actor_1 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0,
lifetime="detached").remote()
actor_2 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1,
lifetime="detached").remote()
actor_3 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=2,
lifetime="detached").remote()
ray.get(actor_1.value.remote())
ray.get(actor_2.value.remote())
ray.get(actor_3.value.remote())
cluster.remove_node(get_other_nodes(cluster, exclude_head=True)[-1])
cluster.wait_for_nodes()
actor_4 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0,
lifetime="detached").remote()
actor_5 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1,
lifetime="detached").remote()
actor_6 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=2,
lifetime="detached").remote()
ray.get(actor_4.value.remote())
ray.get(actor_5.value.remote())
ray.get(actor_6.value.remote())
ray.shutdown()
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_check_bundle_index(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name", strategy="SPREAD", bundles=[{
"CPU": 2
}, {
"CPU": 2
}])
error_count = 0
try:
Actor.options(
placement_group=placement_group,
placement_group_bundle_index=3).remote()
except ValueError:
error_count = error_count + 1
assert error_count == 1
try:
Actor.options(
placement_group=placement_group,
placement_group_bundle_index=-2).remote()
except ValueError:
error_count = error_count + 1
assert error_count == 2
try:
Actor.options(placement_group_bundle_index=0).remote()
except ValueError:
error_count = error_count + 1
assert error_count == 3
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_pending_placement_group_wait(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
[cluster.add_node(num_cpus=2) for _ in range(1)]
ray.init(address=cluster.address)
cluster.wait_for_nodes()
with connect_to_client_or_not(connect_to_client):
# Wait on placement group that cannot be created.
placement_group = ray.util.placement_group(
name="name",
strategy="SPREAD",
bundles=[
{
"CPU": 2
},
{
"CPU": 2
},
{
"GPU": 2
},
])
ready, unready = ray.wait([placement_group.ready()], timeout=0.1)
assert len(unready) == 1
assert len(ready) == 0
table = ray.util.placement_group_table(placement_group)
assert table["state"] == "PENDING"
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(placement_group.ready(), timeout=0.1)
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_wait(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
[cluster.add_node(num_cpus=2) for _ in range(2)]
ray.init(address=cluster.address)
cluster.wait_for_nodes()
with connect_to_client_or_not(connect_to_client):
# Wait on placement group that cannot be created.
placement_group = ray.util.placement_group(
name="name", strategy="SPREAD", bundles=[
{
"CPU": 2
},
{
"CPU": 2
},
])
ready, unready = ray.wait([placement_group.ready()])
assert len(unready) == 0
assert len(ready) == 1
table = ray.util.placement_group_table(placement_group)
assert table["state"] == "CREATED"
pg = ray.get(placement_group.ready())
assert pg.bundle_specs == placement_group.bundle_specs
assert pg.id.binary() == placement_group.id.binary()
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_schedule_placement_group_when_node_add(ray_start_cluster,
connect_to_client):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
# Creating a placement group that cannot be satisfied yet.
placement_group = ray.util.placement_group([{"GPU": 2}, {"CPU": 2}])
def is_placement_group_created():
table = ray.util.placement_group_table(placement_group)
if "state" not in table:
return False
return table["state"] == "CREATED"
# Add a node that has GPU.
cluster.add_node(num_cpus=4, num_gpus=4)
# Make sure the placement group is created.
wait_for_condition(is_placement_group_created)
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_atomic_creation(ray_start_cluster, connect_to_client):
# Setup cluster.
cluster = ray_start_cluster
bundle_cpu_size = 2
bundle_per_node = 2
num_nodes = 2
[
cluster.add_node(num_cpus=bundle_cpu_size * bundle_per_node)
for _ in range(num_nodes)
]
ray.init(address=cluster.address)
@ray.remote(num_cpus=1)
class NormalActor:
def ping(self):
pass
@ray.remote(num_cpus=3)
def bothering_task():
time.sleep(6)
return True
with connect_to_client_or_not(connect_to_client):
# Schedule tasks to fail initial placement group creation.
tasks = [bothering_task.remote() for _ in range(2)]
# Make sure the two common task has scheduled.
def tasks_scheduled():
return ray.available_resources()["CPU"] == 2.0
wait_for_condition(tasks_scheduled)
# Create an actor that will fail bundle scheduling.
# It is important to use pack strategy to make test less flaky.
pg = ray.util.placement_group(
name="name",
strategy="SPREAD",
bundles=[{
"CPU": bundle_cpu_size
} for _ in range(num_nodes * bundle_per_node)])
# Create a placement group actor.
# This shouldn't be scheduled because atomic
# placement group creation should've failed.
pg_actor = NormalActor.options(
placement_group=pg,
placement_group_bundle_index=num_nodes * bundle_per_node -
1).remote()
# Wait on the placement group now. It should be unready
# because normal actor takes resources that are required
# for one of bundle creation.
ready, unready = ray.wait([pg.ready()], timeout=0.5)
assert len(ready) == 0
assert len(unready) == 1
# Wait until all tasks are done.
assert all(ray.get(tasks))
# Wait on the placement group creation. Since resources are now
# available, it should be ready soon.
ready, unready = ray.wait([pg.ready()])
assert len(ready) == 1
assert len(unready) == 0
# Confirm that the placement group actor is created. It will
# raise an exception if actor was scheduled before placement
# group was created thus it checks atomicity.
ray.get(pg_actor.ping.remote(), timeout=3.0)
ray.kill(pg_actor)
# Make sure atomic creation failure didn't impact resources.
@ray.remote(num_cpus=bundle_cpu_size)
def resource_check():
return True
# This should hang because every resources
# are claimed by placement group.
check_without_pg = [
resource_check.remote() for _ in range(bundle_per_node * num_nodes)
]
# This all should scheduled on each bundle.
check_with_pg = [
resource_check.options(
placement_group=pg, placement_group_bundle_index=i).remote()
for i in range(bundle_per_node * num_nodes)
]
# Make sure these are hanging.
ready, unready = ray.wait(check_without_pg, timeout=0)
assert len(ready) == 0
assert len(unready) == bundle_per_node * num_nodes
# Make sure these are all scheduled.
assert all(ray.get(check_with_pg))
ray.util.remove_placement_group(pg)
def pg_removed():
return ray.util.placement_group_table(pg)["state"] == "REMOVED"
wait_for_condition(pg_removed)
# Make sure check without pgs are all
# scheduled properly because resources are cleaned up.
assert all(ray.get(check_without_pg))
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_mini_integration(ray_start_cluster, connect_to_client):
# Create bundles as many as number of gpus in the cluster.
# Do some random work and make sure all resources are properly recovered.
cluster = ray_start_cluster
num_nodes = 5
per_bundle_gpus = 2
gpu_per_node = 4
total_gpus = num_nodes * per_bundle_gpus * gpu_per_node
per_node_gpus = per_bundle_gpus * gpu_per_node
bundles_per_pg = 2
total_num_pg = total_gpus // (bundles_per_pg * per_bundle_gpus)
[
cluster.add_node(num_cpus=2, num_gpus=per_bundle_gpus * gpu_per_node)
for _ in range(num_nodes)
]
cluster.wait_for_nodes()
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
@ray.remote(num_cpus=0, num_gpus=1)
def random_tasks():
import time
import random
sleep_time = random.uniform(0.1, 0.2)
time.sleep(sleep_time)
return True
pgs = []
pg_tasks = []
# total bundle gpu usage = bundles_per_pg*total_num_pg*per_bundle_gpus
# Note this is half of total
for index in range(total_num_pg):
pgs.append(
ray.util.placement_group(
name=f"name{index}",
strategy="PACK",
bundles=[{
"GPU": per_bundle_gpus
} for _ in range(bundles_per_pg)]))
# Schedule tasks.
for i in range(total_num_pg):
pg = pgs[i]
pg_tasks.append([
random_tasks.options(
placement_group=pg,
placement_group_bundle_index=bundle_index).remote()
for bundle_index in range(bundles_per_pg)
])
# Make sure tasks are done and we remove placement groups.
num_removed_pg = 0
pg_indexes = [2, 3, 1, 7, 8, 9, 0, 6, 4, 5]
while num_removed_pg < total_num_pg:
index = pg_indexes[num_removed_pg]
pg = pgs[index]
assert all(ray.get(pg_tasks[index]))
ray.util.remove_placement_group(pg)
num_removed_pg += 1
@ray.remote(num_cpus=2, num_gpus=per_node_gpus)
class A:
def ping(self):
return True
# Make sure all resources are properly returned by scheduling
# actors that take up all existing resources.
actors = [A.remote() for _ in range(num_nodes)]
assert all(ray.get([a.ping.remote() for a in actors]))
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_capture_child_actors(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
total_num_actors = 4
for _ in range(2):
cluster.add_node(num_cpus=total_num_actors)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
pg = ray.util.placement_group(
[{
"CPU": 2
}, {
"CPU": 2
}], strategy="STRICT_PACK")
ray.get(pg.ready())
# If get_current_placement_group is used when the current worker/driver
# doesn't belong to any of placement group, it should return None.
assert get_current_placement_group() is None
# Test actors first.
@ray.remote(num_cpus=1)
class NestedActor:
def ready(self):
return True
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
self.actors = []
def ready(self):
return True
def schedule_nested_actor(self):
# Make sure we can capture the current placement group.
assert get_current_placement_group() is not None
# Actors should be implicitly captured.
actor = NestedActor.remote()
ray.get(actor.ready.remote())
self.actors.append(actor)
def schedule_nested_actor_outside_pg(self):
# Don't use placement group.
actor = NestedActor.options(placement_group=None).remote()
ray.get(actor.ready.remote())
self.actors.append(actor)
a = Actor.options(placement_group=pg).remote()
ray.get(a.ready.remote())
# 1 top level actor + 3 children.
for _ in range(total_num_actors - 1):
ray.get(a.schedule_nested_actor.remote())
# Make sure all the actors are scheduled on the same node.
# (why? The placement group has STRICT_PACK strategy).
node_id_set = set()
for actor_info in ray.state.actors().values():
if actor_info["State"] == ray.gcs_utils.ActorTableData.ALIVE:
node_id = actor_info["Address"]["NodeID"]
node_id_set.add(node_id)
# Since all node id should be identical, set should be equal to 1.
assert len(node_id_set) == 1
# Kill an actor and wait until it is killed.
kill_actor_and_wait_for_failure(a)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(a.ready.remote())
# Now create an actor, but do not capture the current tasks
a = Actor.options(
placement_group=pg,
placement_group_capture_child_tasks=False).remote()
ray.get(a.ready.remote())
# 1 top level actor + 3 children.
for _ in range(total_num_actors - 1):
ray.get(a.schedule_nested_actor.remote())
# Make sure all the actors are not scheduled on the same node.
# It is because the child tasks are not scheduled on the same
# placement group.
node_id_set = set()
for actor_info in ray.state.actors().values():
if actor_info["State"] == ray.gcs_utils.ActorTableData.ALIVE:
node_id = actor_info["Address"]["NodeID"]
node_id_set.add(node_id)
assert len(node_id_set) == 2
# Kill an actor and wait until it is killed.
kill_actor_and_wait_for_failure(a)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(a.ready.remote())
# Lastly, make sure when None is specified, actors are not scheduled
# on the same placement group.
a = Actor.options(placement_group=pg).remote()
ray.get(a.ready.remote())
# 1 top level actor + 3 children.
for _ in range(total_num_actors - 1):
ray.get(a.schedule_nested_actor_outside_pg.remote())
# Make sure all the actors are not scheduled on the same node.
# It is because the child tasks are not scheduled on the same
# placement group.
node_id_set = set()
for actor_info in ray.state.actors().values():
if actor_info["State"] == ray.gcs_utils.ActorTableData.ALIVE:
node_id = actor_info["Address"]["NodeID"]
node_id_set.add(node_id)
assert len(node_id_set) == 2
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_capture_child_tasks(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
total_num_tasks = 4
for _ in range(2):
cluster.add_node(num_cpus=total_num_tasks, num_gpus=total_num_tasks)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
pg = ray.util.placement_group(
[{
"CPU": 2,
"GPU": 2,
}, {
"CPU": 2,
"GPU": 2,
}],
strategy="STRICT_PACK")
ray.get(pg.ready())
# If get_current_placement_group is used when the current worker/driver
# doesn't belong to any of placement group, it should return None.
assert get_current_placement_group() is None
# Test if tasks capture child tasks.
@ray.remote
def task():
return get_current_placement_group()
@ray.remote
def create_nested_task(child_cpu, child_gpu, set_none=False):
assert get_current_placement_group() is not None
kwargs = {
"num_cpus": child_cpu,
"num_gpus": child_gpu,
}
if set_none:
kwargs["placement_group"] = None
return ray.get([task.options(**kwargs).remote() for _ in range(3)])
t = create_nested_task.options(
num_cpus=1, num_gpus=0, placement_group=pg).remote(1, 0)
pgs = ray.get(t)
# Every task should have current placement group because they
# should be implicitly captured by default.
assert None not in pgs
t1 = create_nested_task.options(
num_cpus=1, num_gpus=0, placement_group=pg).remote(1, 0, True)
pgs = ray.get(t1)
# Every task should have no placement group since it's set to None.
# should be implicitly captured by default.
assert set(pgs) == {None}
# Test if tasks don't capture child tasks when the option is off.
t2 = create_nested_task.options(
num_cpus=0,
num_gpus=1,
placement_group=pg,
placement_group_capture_child_tasks=False).remote(0, 1)
pgs = ray.get(t2)
# All placement groups should be None since we don't capture child
# tasks.
assert not all(pgs)
def test_ready_warning_suppressed(ray_start_regular, error_pubsub):
p = error_pubsub
# Create an infeasible pg.
pg = ray.util.placement_group([{"CPU": 2}] * 2, strategy="STRICT_PACK")
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(pg.ready(), timeout=0.5)
errors = get_error_message(
p, 1, ray.ray_constants.INFEASIBLE_TASK_ERROR, timeout=0.1)
assert len(errors) == 0
def test_automatic_cleanup_job(ray_start_cluster):
# Make sure the placement groups created by a
# job, actor, and task are cleaned when the job is done.
cluster = ray_start_cluster
num_nodes = 3
num_cpu_per_node = 4
# Create 3 nodes cluster.
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpu_per_node)
cluster.wait_for_nodes()
info = ray.init(address=cluster.address)
available_cpus = ray.available_resources()["CPU"]
assert available_cpus == num_nodes * num_cpu_per_node
driver_code = f"""
import ray
ray.init(address="{info["redis_address"]}")
def create_pg():
pg = ray.util.placement_group(
[{{"CPU": 1}} for _ in range(3)],
strategy="STRICT_SPREAD")
ray.get(pg.ready())
return pg
@ray.remote(num_cpus=0)
def f():
create_pg()
@ray.remote(num_cpus=0)
class A:
def create_pg(self):
create_pg()
ray.get(f.remote())
a = A.remote()
ray.get(a.create_pg.remote())
# Create 2 pgs to make sure multiple placement groups that belong
# to a single job will be properly cleaned.
create_pg()
create_pg()
ray.shutdown()
"""
run_string_as_driver(driver_code)
# Wait until the driver is reported as dead by GCS.
def is_job_done():
jobs = ray.state.jobs()
for job in jobs:
if job["IsDead"]:
return True
return False
def assert_num_cpus(expected_num_cpus):
if expected_num_cpus == 0:
return "CPU" not in ray.available_resources()
return ray.available_resources()["CPU"] == expected_num_cpus
wait_for_condition(is_job_done)
available_cpus = ray.available_resources()["CPU"]
wait_for_condition(lambda: assert_num_cpus(num_nodes * num_cpu_per_node))
def test_automatic_cleanup_detached_actors(ray_start_cluster):
# Make sure the placement groups created by a
# detached actors are cleaned properly.
cluster = ray_start_cluster
num_nodes = 3
num_cpu_per_node = 2
# Create 3 nodes cluster.
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpu_per_node)
cluster.wait_for_nodes()
info = ray.init(address=cluster.address, namespace="")
available_cpus = ray.available_resources()["CPU"]
assert available_cpus == num_nodes * num_cpu_per_node
driver_code = f"""
import ray
ray.init(address="{info["redis_address"]}", namespace="")
def create_pg():
pg = ray.util.placement_group(
[{{"CPU": 1}} for _ in range(3)],
strategy="STRICT_SPREAD")
ray.get(pg.ready())
return pg
# TODO(sang): Placement groups created by tasks launched by detached actor
# is not cleaned with the current protocol.
# @ray.remote(num_cpus=0)
# def f():
# create_pg()
@ray.remote(num_cpus=0, max_restarts=1)
class A:
def create_pg(self):
create_pg()
def create_child_pg(self):
self.a = A.options(name="B").remote()
ray.get(self.a.create_pg.remote())
def kill_child_actor(self):
ray.kill(self.a)
try:
ray.get(self.a.create_pg.remote())
except Exception:
pass
a = A.options(lifetime="detached", name="A").remote()
ray.get(a.create_pg.remote())
# TODO(sang): Currently, child tasks are cleaned when a detached actor
# is dead. We cannot test this scenario until it is fixed.
# ray.get(a.create_child_pg.remote())
ray.shutdown()
"""
run_string_as_driver(driver_code)
# Wait until the driver is reported as dead by GCS.
def is_job_done():
jobs = ray.state.jobs()
for job in jobs:
if job["IsDead"]:
return True
return False
def assert_num_cpus(expected_num_cpus):
if expected_num_cpus == 0:
return "CPU" not in ray.available_resources()
return ray.available_resources()["CPU"] == expected_num_cpus
wait_for_condition(is_job_done)
wait_for_condition(lambda: assert_num_cpus(num_nodes))
# Make sure when a child actor spawned by a detached actor
# is killed, the placement group is removed.
a = ray.get_actor("A")
# TODO(sang): child of detached actors
# seem to be killed when jobs are done. We should fix this before
# testing this scenario.
# ray.get(a.kill_child_actor.remote())
# assert assert_num_cpus(num_nodes)
# Make sure placement groups are cleaned when detached actors are killed.
ray.kill(a, no_restart=False)
wait_for_condition(lambda: assert_num_cpus(num_nodes * num_cpu_per_node))
# The detached actor a should've been restarted.
# Recreate a placement group.
ray.get(a.create_pg.remote())
wait_for_condition(lambda: assert_num_cpus(num_nodes))
# Kill it again and make sure the placement group
# that is created is deleted again.
ray.kill(a, no_restart=False)
wait_for_condition(lambda: assert_num_cpus(num_nodes * num_cpu_per_node))
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
num_heartbeats_timeout=10, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_create_placement_group_after_gcs_server_restart(
ray_start_cluster_head):
cluster = ray_start_cluster_head
cluster.add_node(num_cpus=2)
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
# Create placement group 1 successfully.
placement_group1 = ray.util.placement_group([{"CPU": 1}, {"CPU": 1}])
ray.get(placement_group1.ready(), timeout=10)
table = ray.util.placement_group_table(placement_group1)
assert table["state"] == "CREATED"
# Restart gcs server.
cluster.head_node.kill_gcs_server()
cluster.head_node.start_gcs_server()
# Create placement group 2 successfully.
placement_group2 = ray.util.placement_group([{"CPU": 1}, {"CPU": 1}])
ray.get(placement_group2.ready(), timeout=10)
table = ray.util.placement_group_table(placement_group2)
assert table["state"] == "CREATED"
# Create placement group 3.
# Status is `PENDING` because the cluster resource is insufficient.
placement_group3 = ray.util.placement_group([{"CPU": 1}, {"CPU": 1}])
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(placement_group3.ready(), timeout=2)
table = ray.util.placement_group_table(placement_group3)
assert table["state"] == "PENDING"
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
num_heartbeats_timeout=10, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_create_actor_with_placement_group_after_gcs_server_restart(
ray_start_cluster_head):
cluster = ray_start_cluster_head
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
# Create a placement group.
placement_group = ray.util.placement_group([{"CPU": 1}, {"CPU": 1}])
# Create an actor that occupies resources after gcs server restart.
cluster.head_node.kill_gcs_server()
cluster.head_node.start_gcs_server()
actor_2 = Increase.options(
placement_group=placement_group,
placement_group_bundle_index=1).remote()
assert ray.get(actor_2.method.remote(1)) == 3
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
num_heartbeats_timeout=10, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_create_placement_group_during_gcs_server_restart(
ray_start_cluster_head):
cluster = ray_start_cluster_head
cluster.add_node(num_cpus=200)
cluster.wait_for_nodes()
# Create placement groups during gcs server restart.
placement_groups = []
for i in range(0, 100):
placement_group = ray.util.placement_group([{"CPU": 1}, {"CPU": 1}])
placement_groups.append(placement_group)
cluster.head_node.kill_gcs_server()
cluster.head_node.start_gcs_server()
for i in range(0, 100):
ray.get(placement_groups[i].ready())
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
num_heartbeats_timeout=10, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_placement_group_wait_api(ray_start_cluster_head):
cluster = ray_start_cluster_head
cluster.add_node(num_cpus=2)
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
# Create placement group 1 successfully.
placement_group1 = ray.util.placement_group([{"CPU": 1}, {"CPU": 1}])
assert placement_group1.wait(10)
# Restart gcs server.
cluster.head_node.kill_gcs_server()
cluster.head_node.start_gcs_server()
# Create placement group 2 successfully.
placement_group2 = ray.util.placement_group([{"CPU": 1}, {"CPU": 1}])
assert placement_group2.wait(10)
# Remove placement group 1.
ray.util.remove_placement_group(placement_group1)
# Wait for placement group 1 after it is removed.
with pytest.raises(Exception):
placement_group1.wait(10)
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_schedule_placement_groups_at_the_same_time(connect_to_client):
ray.init(num_cpus=4)
with connect_to_client_or_not(connect_to_client):
pgs = [placement_group([{"CPU": 2}]) for _ in range(6)]
wait_pgs = {pg.ready(): pg for pg in pgs}
def is_all_placement_group_removed():
ready, _ = ray.wait(list(wait_pgs.keys()), timeout=0.5)
if ready:
ready_pg = wait_pgs[ready[0]]
remove_placement_group(ready_pg)
del wait_pgs[ready[0]]
if len(wait_pgs) == 0:
return True
return False
wait_for_condition(is_all_placement_group_removed)
ray.shutdown()
def test_detached_placement_group(ray_start_cluster):
cluster = ray_start_cluster
for _ in range(2):
cluster.add_node(num_cpus=3)
cluster.wait_for_nodes()
info = ray.init(address=cluster.address)
# Make sure detached placement group will alive when job dead.
driver_code = f"""
import ray
ray.init(address="{info["redis_address"]}")
pg = ray.util.placement_group(
[{{"CPU": 1}} for _ in range(2)],
strategy="STRICT_SPREAD", lifetime="detached")
ray.get(pg.ready())
@ray.remote(num_cpus=1)
class Actor:
def ready(self):
return True
for bundle_index in range(2):
actor = Actor.options(lifetime="detached", placement_group=pg,
placement_group_bundle_index=bundle_index).remote()
ray.get(actor.ready.remote())
ray.shutdown()
"""
run_string_as_driver(driver_code)
# Wait until the driver is reported as dead by GCS.
def is_job_done():
jobs = ray.state.jobs()
for job in jobs:
if job["IsDead"]:
return True
return False
def assert_alive_num_pg(expected_num_pg):
alive_num_pg = 0
for _, placement_group_info in ray.util.placement_group_table().items(
):
if placement_group_info["state"] == "CREATED":
alive_num_pg += 1
return alive_num_pg == expected_num_pg
def assert_alive_num_actor(expected_num_actor):
alive_num_actor = 0
for actor_info in ray.state.actors().values():
if actor_info["State"] == ray.gcs_utils.ActorTableData.ALIVE:
alive_num_actor += 1
return alive_num_actor == expected_num_actor
wait_for_condition(is_job_done)
assert assert_alive_num_pg(1)
assert assert_alive_num_actor(2)
# Make sure detached placement group will alive when its creator which
# is detached actor dead.
# Test actors first.
@ray.remote(num_cpus=1)
class NestedActor:
def ready(self):
return True
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
self.actors = []
def ready(self):
return True
def schedule_nested_actor_with_detached_pg(self):
# Create placement group which is detached.
pg = ray.util.placement_group(
[{
"CPU": 1
} for _ in range(2)],
strategy="STRICT_SPREAD",
lifetime="detached",
name="detached_pg")
ray.get(pg.ready())
# Schedule nested actor with the placement group.
for bundle_index in range(2):
actor = NestedActor.options(
placement_group=pg,
placement_group_bundle_index=bundle_index,
lifetime="detached").remote()
ray.get(actor.ready.remote())
self.actors.append(actor)
a = Actor.options(lifetime="detached").remote()
ray.get(a.ready.remote())
# 1 parent actor and 2 children actor.
ray.get(a.schedule_nested_actor_with_detached_pg.remote())
# Kill an actor and wait until it is killed.
kill_actor_and_wait_for_failure(a)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(a.ready.remote())
# We should have 2 alive pgs and 4 alive actors.
assert assert_alive_num_pg(2)
assert assert_alive_num_actor(4)
def test_named_placement_group(ray_start_cluster):
cluster = ray_start_cluster
for _ in range(2):
cluster.add_node(num_cpus=3)
cluster.wait_for_nodes()
info = ray.init(address=cluster.address, namespace="")
global_placement_group_name = "named_placement_group"
# Create a detached placement group with name.
driver_code = f"""
import ray
ray.init(address="{info["redis_address"]}", namespace="")
pg = ray.util.placement_group(
[{{"CPU": 1}} for _ in range(2)],
strategy="STRICT_SPREAD",
name="{global_placement_group_name}",
lifetime="detached")
ray.get(pg.ready())
ray.shutdown()
"""
run_string_as_driver(driver_code)
# Wait until the driver is reported as dead by GCS.
def is_job_done():
jobs = ray.state.jobs()
for job in jobs:
if job["IsDead"]:
return True
return False
wait_for_condition(is_job_done)
@ray.remote(num_cpus=1)
class Actor:
def ping(self):
return "pong"
# Get the named placement group and schedule a actor.
placement_group = ray.util.get_placement_group(global_placement_group_name)
assert placement_group is not None
assert placement_group.wait(5)
actor = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0).remote()
ray.get(actor.ping.remote())
# Create another placement group and make sure its creation will failed.
error_creation_count = 0
try:
ray.util.placement_group(
[{
"CPU": 1
} for _ in range(2)],
strategy="STRICT_SPREAD",
name=global_placement_group_name)
except RaySystemError:
error_creation_count += 1
assert error_creation_count == 1
# Remove a named placement group and make sure the second creation
# will successful.
ray.util.remove_placement_group(placement_group)
same_name_pg = ray.util.placement_group(
[{
"CPU": 1
} for _ in range(2)],
strategy="STRICT_SPREAD",
name=global_placement_group_name)
assert same_name_pg.wait(10)
# Get a named placement group with a name that doesn't exist
# and make sure it will raise ValueError correctly.
error_count = 0
try:
ray.util.get_placement_group("inexistent_pg")
except ValueError:
error_count = error_count + 1
assert error_count == 1
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_synchronous_registration(ray_start_cluster,
connect_to_client):
cluster = ray_start_cluster
# One node which only has one CPU.
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
# Create a placement group that has two bundles and `STRICT_PACK`
# strategy so its registration will successful but scheduling failed.
placement_group = ray.util.placement_group(
name="name",
strategy="STRICT_PACK",
bundles=[{
"CPU": 1,
}, {
"CPU": 1
}])
# Make sure we can properly remove it immediately
# as its registration is synchronous.
ray.util.remove_placement_group(placement_group)
def is_placement_group_removed():
table = ray.util.placement_group_table(placement_group)
if "state" not in table:
return False
return table["state"] == "REMOVED"
wait_for_condition(is_placement_group_removed)
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_gpu_set(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
# One node which only has one CPU.
cluster.add_node(num_cpus=1, num_gpus=1)
cluster.add_node(num_cpus=1, num_gpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name",
strategy="PACK",
bundles=[{
"CPU": 1,
"GPU": 1
}, {
"CPU": 1,
"GPU": 1
}])
@ray.remote(num_gpus=1)
def get_gpus():
return ray.get_gpu_ids()
result = get_gpus.options(
placement_group=placement_group,
placement_group_bundle_index=0).remote()
result = ray.get(result)
assert result == [0]
result = get_gpus.options(
placement_group=placement_group,
placement_group_bundle_index=1).remote()
result = ray.get(result)
assert result == [0]
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_gpu_assigned(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
cluster.add_node(num_gpus=2)
ray.init(address=cluster.address)
gpu_ids_res = set()
@ray.remote(num_gpus=1, num_cpus=0)
def f():
import os
return os.environ["CUDA_VISIBLE_DEVICES"]
with connect_to_client_or_not(connect_to_client):
pg1 = ray.util.placement_group([{"GPU": 1}])
pg2 = ray.util.placement_group([{"GPU": 1}])
assert pg1.wait(10)
assert pg2.wait(10)
gpu_ids_res.add(ray.get(f.options(placement_group=pg1).remote()))
gpu_ids_res.add(ray.get(f.options(placement_group=pg2).remote()))
assert len(gpu_ids_res) == 2
def test_placement_group_client_option_serialization():
"""Tests conversion of placement group to json-serializable dict and back.
Tests conversion
placement_group -> dict -> placement_group and
dict -> placement_group -> dict
with and without non-null bundle cache.
"""
# Tests conversion from dict to placement group and back.
def dict_to_pg_to_dict(pg_dict_in):
pg = PlacementGroup.from_dict(pg_dict_in)
pg_dict_out = pg.to_dict()
assert pg_dict_in == pg_dict_out
# Tests conversion from placement group to dict and back.
def pg_to_dict_to_pg(pg_in):
pg_dict = pg_in.to_dict()
pg_out = PlacementGroup.from_dict(pg_dict)
assert pg_out.id == pg_in.id
assert pg_out.bundle_cache == pg_in.bundle_cache
pg_id = PlacementGroupID(id=bytes(16))
id_string = bytes(16).hex()
bundle_cache = [{"CPU": 2}, {"custom_resource": 5}]
pg_with_bundles = PlacementGroup(id=pg_id, bundle_cache=bundle_cache)
pg_to_dict_to_pg(pg_with_bundles)
pg_no_bundles = PlacementGroup(id=pg_id)
pg_to_dict_to_pg(pg_no_bundles)
pg_dict_with_bundles = {"id": id_string, "bundle_cache": bundle_cache}
dict_to_pg_to_dict(pg_dict_with_bundles)
pg_dict_no_bundles = {"id": id_string, "bundle_cache": None}
dict_to_pg_to_dict(pg_dict_no_bundles)
def test_actor_scheduling_not_block_with_placement_group(ray_start_cluster):
"""Tests the scheduling of lots of actors will not be blocked
when using placement groups.
For more detailed information please refer to:
https://github.com/ray-project/ray/issues/15801.
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
class A:
def ready(self):
pass
actor_num = 1000
pgs = [ray.util.placement_group([{"CPU": 1}]) for _ in range(actor_num)]
actors = [A.options(placement_group=pg).remote() for pg in pgs]
refs = [actor.ready.remote() for actor in actors]
expected_created_num = 1
def is_actor_created_number_correct():
ready, not_ready = ray.wait(refs, num_returns=len(refs), timeout=1)
return len(ready) == expected_created_num
def is_pg_created_number_correct():
created_pgs = [
pg for _, pg in ray.util.placement_group_table().items()
if pg["state"] == "CREATED"
]
return len(created_pgs) == expected_created_num
wait_for_condition(is_pg_created_number_correct, timeout=3)
wait_for_condition(
is_actor_created_number_correct, timeout=30, retry_interval_ms=0)
# NOTE: we don't need to test all the actors create successfully.
for _ in range(20):
expected_created_num += 1
cluster.add_node(num_cpus=1)
wait_for_condition(is_pg_created_number_correct, timeout=10)
# Make sure the node add event will cause a waiting actor
# to create successfully in time.
wait_for_condition(
is_actor_created_number_correct, timeout=30, retry_interval_ms=0)
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_gpu_unique_assigned(ray_start_cluster,
connect_to_client):
cluster = ray_start_cluster
cluster.add_node(num_gpus=4, num_cpus=4)
ray.init(address=cluster.address)
gpu_ids_res = set()
# Create placement group with 4 bundles using 1 GPU each.
num_gpus = 4
bundles = [{"GPU": 1, "CPU": 1} for _ in range(num_gpus)]
pg = placement_group(bundles)
ray.get(pg.ready())
# Actor using 1 GPU that has a method to get
# $CUDA_VISIBLE_DEVICES env variable.
@ray.remote(num_gpus=1, num_cpus=1)
class Actor:
def get_gpu(self):
import os
return os.environ["CUDA_VISIBLE_DEVICES"]
# Create actors out of order.
actors = []
actors.append(
Actor.options(placement_group=pg,
placement_group_bundle_index=0).remote())
actors.append(
Actor.options(placement_group=pg,
placement_group_bundle_index=3).remote())
actors.append(
Actor.options(placement_group=pg,
placement_group_bundle_index=2).remote())
actors.append(
Actor.options(placement_group=pg,
placement_group_bundle_index=1).remote())
for actor in actors:
gpu_ids = ray.get(actor.get_gpu.remote())
assert len(gpu_ids) == 1
gpu_ids_res.add(gpu_ids)
assert len(gpu_ids_res) == 4
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
data-generator/src/main/java/com/redhat/summit2019/Main.java
|
package com.redhat.summit2019;
import com.redhat.summit2019.generator.DataGenerator;
import okhttp3.Credentials;
import okhttp3.Headers;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
import java.net.URL;
public class Main {
private static final OkHttpClient http = new OkHttpClient();
private static final DataGenerator dataGenerator = new DataGenerator();
private static final String BASE_URL = System.getenv().getOrDefault("PAM_BASE_URL", "http://127.0.0.1:8080/kie-server/services/rest/server");
private static final String CONTAINER_ID = System.getenv().getOrDefault("PAM_CONTAINER_ID", "kafka-jbpm-process_1.0.18-SNAPSHOT");
private static final String PROCESS_INSTANCE_ID = System.getenv().getOrDefault("PAM_PROCESS_INSTANCE_ID", "kafka-jbpm-process.claimfund-process");
private static final String USERNAME = System.getenv().getOrDefault("JBPM_USERNAME", "wbadmin");
private static final String PASSWORD = System.getenv().getOrDefault("JBPM_PASSWORD", "wbadmin");
public static void main(String[] args) throws Exception {
int quantity = 10;
Headers authHeader = new Headers.Builder()
.add("Authorization", Credentials.basic(USERNAME, PASSWORD))
.build();
URL url = new URL(BASE_URL + "/containers/" + CONTAINER_ID + "/processes/" + PROCESS_INSTANCE_ID + "/instances");
if (args != null && args.length > 0) {
quantity = Integer.parseInt(args[0]);
}
System.out.println("Generating " + quantity + " new task(s).");
for (int i = 0; i < quantity; i++) {
System.out.println("Creating task #" + i);
String json = dataGenerator.generateJsonData();
RequestBody body = RequestBody.create(MediaType.get("application/json"), json);
Request request = new Request.Builder()
.url(url)
.headers(authHeader)
.post(body)
.build();
Response response = http.newCall(request).execute();
System.out.println("POST: " + response.code() + " " + (response.body() != null ? response.body().string() : "null"));
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
Cloud_Functions/ocr-save/main.py
|
import base64
import json
import os
from google.cloud import storage
from google.cloud import vision
from google.cloud import pubsub_v1
vision_client = vision.ImageAnnotatorClient()
storage_client = storage.Client()
publisher = pubsub_v1.PublisherClient()
project_id = os.environ['GCP_PROJECT']
with open("config.json") as f:
data = f.read()
config = json.loads(data)
def process_image(file, context):
"""Cloud Function triggered by Cloud Storage when a file is changed.
Args:
file (dict): Metadata of the changed file, provided by the triggering
Cloud Storage event.
context (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None; the output is written to stdout and Stackdriver Logging
"""
bucket = validate_message(file, 'bucket')
name = validate_message(file, 'name')
detect_text(bucket, name)
print('File {} processed.'.format(file['name']))
def detect_text(bucket, filename):
print('Looking for text in image {}'.format(filename))
text_detection_response = vision_client.text_detection({
'source': {'image_uri': 'gs://{}/{}'.format(bucket, filename)}
})
annotations = text_detection_response.text_annotations
if len(annotations) > 0:
text = annotations[0].description
else:
text = ''
print('Extracted text {} from image ({} chars).'.format(text, len(text)))
topic_name = config['RESULT_TOPIC']
message = {
'text': text
}
message_data = json.dumps(message).encode('utf-8')
topic_path = publisher.topic_path(project_id, topic_name)
future = publisher.publish(topic_path, data=message_data)
future.result()
def save_result(event, context):
if event.get('data'):
message_data = base64.b64decode(event['data']).decode('utf-8')
message = json.loads(message_data)
else:
raise ValueError('Data sector is missing in the Pub/Sub message.')
text = validate_message(message, 'text')
filename = validate_message(message, 'filename')
print('Received request to save file {}.'.format(filename))
bucket_name = config['RESULT_BUCKET']
result_filename = '{}.txt'.format(filename)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(result_filename)
print('Saving result to {} in bucket {}.'.format(result_filename,
bucket_name))
blob.upload_from_string(text)
print('File saved.')
def validate_message(message, param):
var = message.get(param)
if not var:
raise ValueError('{} is not provided. Make sure you have \
property {} in the request'.format(param, param))
return var
|
[] |
[] |
[
"GCP_PROJECT"
] |
[]
|
["GCP_PROJECT"]
|
python
| 1 | 0 | |
keras/training/train_resnet_empty.py
|
#!/usr/bin/env python3
# import matplotlib
# matplotlib.use("TkAgg") # use for OSX
import math, json, os, pickle, sys
import keras
# import matplotlib.pyplot as plt
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint
from keras.layers import Dense
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
do_log = True
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
name = "ResNet50_empty"
log_file = "{}_history_log.csv".format(name)
DIRLOG = "/storage/plzen1/home/radekj/vmmr/results/resnet_empty"
csv_logger = CSVLogger(os.path.join(DIRLOG, log_file), append=True)
SIZE = (224, 224)
BATCH_SIZE = 64
EPOCH = 20
def train_cnn(folder):
DATA_DIR = folder
TRAIN_DIR = os.path.join(DATA_DIR, 'train')
VALID_DIR = os.path.join(DATA_DIR, 'valid')
TEST_DIR = os.path.join(DATA_DIR, 'test')
save_aug = os.path.join(DATA_DIR, 'tmp')
num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])
num_valid_samples = sum([len(files) for r, d, files in os.walk(VALID_DIR)])
num_train_steps = math.floor(num_train_samples/BATCH_SIZE)
num_valid_steps = math.floor(num_valid_samples/BATCH_SIZE)
shift = 0.05
# gen = ImageDataGenerator(zca_whitening=True,
# width_shift_range=shift,
# height_shift_range=shift,
# horizontal_flip=True,
# vertical_flip=False,
# rotation_range=8,
# zoom_range=0.1,
# featurewise_center=True,
# featurewise_std_normalization=True)
gen = ImageDataGenerator()
val_gen = ImageDataGenerator()
batches = gen.flow_from_directory(TRAIN_DIR, target_size=SIZE, class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
val_batches = val_gen.flow_from_directory(VALID_DIR, target_size=SIZE, class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
model = keras.applications.resnet50.ResNet50(include_top=True, weights=None)
classes = list(iter(batches.class_indices))
model.layers.pop()
last = model.layers[-1].output
x = Dense(len(classes), activation="softmax")(last)
finetuned_model = Model(model.input, x)
finetuned_model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
finetuned_model.classes = classes
early_stopping = EarlyStopping(patience=5)
checkpointer = ModelCheckpoint("{}_best.h5".format(name), verbose=1, save_best_only=True)
history = finetuned_model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=EPOCH,
callbacks=[csv_logger, early_stopping, checkpointer],
validation_data=val_batches,
validation_steps=num_valid_steps)
finetuned_model.save("{}_final.h5".format(name))
if __name__ == '__main__':
"""
dataset_path: /Users/radekj/devroot/vmmr/datasets/sample5
/storage/plzen1/home/radekj/vmmr"
"""
print(len(sys.argv))
if len(sys.argv) < 2:
print("Need param: python train_.py dataset_path")
exit(1)
folder = str(sys.argv[1])
exists = os.path.isdir(folder)
if not exists:
print("Folder '{}' not found.".format(folder))
exit(1)
exists = os.path.isfile(log_file)
if not exists:
f = open(log_file, "w+")
f.write("====== start ====")
f.close()
train_cnn(folder)
print("===== end.")
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
dql/cli.py
|
""" Interative DQL client """
import cmd
import functools
import json
import os
import random
import shlex
import subprocess
from builtins import input
from collections import OrderedDict
from contextlib import contextmanager
from fnmatch import fnmatch
from typing import Any, Callable, ContextManager, Dict, Optional, Tuple
import botocore
from pyparsing import ParseException
from rich.panel import Panel
from rich.syntax import Syntax
from rich.traceback import install
from .engine import FragmentEngine
from .exceptions import EngineRuntimeError
from .help import (
ALTER,
ANALYZE,
CREATE,
DELETE,
DROP,
DUMP,
EXPLAIN,
INSERT,
LOAD,
OPTIONS,
SCAN,
SELECT,
UPDATE,
)
from .history import HistoryManager
from .monitor import Monitor
from .output import (
ColumnFormat,
ExpandedFormat,
JsonFormat,
SmartBuffer,
SmartFormat,
console,
less_display,
stdout_display,
)
from .throttle import TableLimits
# From http://docs.aws.amazon.com/general/latest/gr/rande.html#ddb_region
REGIONS = [
"us-east-1",
"us-west-2",
"us-west-1",
"eu-west-1",
"eu-central-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"sa-east-1",
]
NO_DEFAULT = object()
DISPLAYS = {"stdout": stdout_display, "less": less_display}
FORMATTERS = {
"smart": SmartFormat,
"expanded": ExpandedFormat,
"column": ColumnFormat,
"json": JsonFormat,
}
DEFAULT_CONFIG = {
"width": "auto",
"pagesize": "auto",
"display": "stdout",
"format": "smart",
"allow_select_scan": False,
"lossy_json_float": True,
"_throttle": {},
}
# Installing the rich traceback handler for un-handled errors.
install()
def indent(string, prefix=" "):
""" Indent a paragraph of text """
return "\n".join([prefix + line for line in string.split("\n")])
def prompt(msg, default=NO_DEFAULT, validate=None):
""" Prompt user for input """
while True:
response = input(msg + " ").strip()
if not response:
if default is NO_DEFAULT:
continue
return default
if validate is None or validate(response):
return response
def promptyn(msg: str, default: Optional[bool] = None) -> bool:
"""
Display a blocking prompt until the user confirms.
Case is disregarded for prompt input.
User can input one of: `["y", "n", "yes", "no"]`
Example:
--------
promptyn("This is a message. Do you want to do stuff?", True)
# will print with a default True, capitalizes Y.
"This is a message. Do you want to do stuff? (Y/n)"
promptyn("This is a message. Do you want to do stuff?", False)
# will print with a default False, capitalizes N.
"This is a message. Do you want to do stuff? (y/N)"
"""
while True:
yes = "Y" if default else "y"
if default or default is None:
no = "n"
else:
no = "N"
confirm = prompt("%s [%s/%s]" % (msg, yes, no), "").lower()
if confirm in ("y", "yes"):
return True
elif confirm in ("n", "no"):
return False
elif not confirm and default is not None:
return default
def repl_command(fxn):
"""
Decorator for cmd methods
Parses arguments from the arg string and passes them to the method as *args
and **kwargs.
"""
@functools.wraps(fxn)
def wrapper(self, arglist):
"""Wraps the command method"""
args = []
kwargs = {}
if arglist:
for arg in shlex.split(arglist):
if "=" in arg:
split = arg.split("=", 1)
kwargs[split[0]] = split[1]
else:
args.append(arg)
return fxn(self, *args, **kwargs)
return wrapper
def get_enum_key(key, choices):
""" Get an enum by prefix or equality """
if key in choices:
return key
keys = [k for k in choices if k.startswith(key)]
if len(keys) == 1:
return keys[0]
@contextmanager
def exception_handler(engine):
""" It is a context manager which can handle exceptions and deal with them. """
try:
yield
except KeyboardInterrupt:
spooky_season = [":skull:", ":vampire:", ":zombie:", ":jack-o-lantern:"]
console.print(random.choice(spooky_season))
except botocore.exceptions.BotoCoreError as e:
console.log("BotoCoreError: ", e)
except ParseException as e:
console.log("Engine: ParseException")
syntax = Syntax(
engine.pformat_exc(e),
"sql",
theme="monokai",
line_numbers=True,
word_wrap=True,
)
console.print(Panel(syntax, title="Engine Details", expand=False))
except EngineRuntimeError as e:
console.log(e)
except SyntaxError as e:
console.log(e)
except Exception:
console.print_exception()
class DQLClient(cmd.Cmd):
"""
Interactive commandline interface.
Attributes
----------
running : bool
True while session is active, False after quitting
engine : :class:`dql.engine.FragmentEngine`
"""
running = False
conf: Dict
engine: FragmentEngine
formatter = None
display: Any
session = None
_conf_dir: str
_local_endpoint: Optional[Tuple[str, int]] = None
throttle: TableLimits
# When True, will not output status messages from queries (i.e. "table created").
# Used with --command
_silent: bool = False
history_manager: HistoryManager = HistoryManager()
def initialize(
self,
region: str = "us-west-1",
host: str = None,
port: int = 8000,
config_dir: Optional[str] = None,
session: Optional[Any] = None,
) -> None:
""" Set up the repl for execution. """
self.history_manager.try_to_load_history()
try:
import readline
import rlcompleter
except ImportError:
# Windows doesn't have readline, so gracefully ignore.
pass
else:
# Mac OS X readline compatibility from http://stackoverflow.com/a/7116997
if "libedit" in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
# Tab-complete names with a '-' in them
delims = set(readline.get_completer_delims())
if "-" in delims:
delims.remove("-")
readline.set_completer_delims("".join(delims))
self._conf_dir = config_dir or os.path.join(
os.environ.get("HOME", "."), ".config"
)
self.session = session
self.engine = FragmentEngine()
self.engine.caution_callback = self.caution_callback
kwargs = {}
if host is not None:
self._local_endpoint = (host, port)
# If we don't pass these in we might get a missing credentials error
kwargs["access_key"] = ""
kwargs["secret_key"] = ""
self.engine.connect(
region,
session=session,
host=host,
port=port,
is_secure=(host is None),
**kwargs
)
self.conf = self.load_config()
for key, value in DEFAULT_CONFIG.items():
self.conf.setdefault(key, value)
self.display = DISPLAYS[self.conf["display"]]
self.throttle = TableLimits()
self.throttle.load(self.conf["_throttle"])
def start(self):
""" Start running the interactive session (blocking) """
self.running = True
while self.running:
self.update_prompt()
with exception_handler(self.engine):
self.cmdloop()
self.engine.reset()
def postcmd(self, stop, line):
self.update_prompt()
return stop
def update_prompt(self):
""" Update the prompt """
prefix = ""
if self._local_endpoint is not None:
prefix += "(%s:%d) " % self._local_endpoint
prefix += self.engine.region
if self.engine.partial:
self.prompt = len(prefix) * " " + "> "
else:
self.prompt = prefix + "> "
def do_shell(self, arglist):
""" Run a shell command """
proc = subprocess.Popen(
shlex.split(arglist), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
print(proc.communicate()[0])
def caution_callback(self, action):
"""
Prompt user for manual continue when doing write operation on all items
in a table
"""
msg = "This will run %s on all items in the table! Continue?" % action
return promptyn(msg, False)
def save_config(self):
""" Save the conf file """
if not os.path.exists(self._conf_dir):
os.makedirs(self._conf_dir)
conf_file = os.path.join(self._conf_dir, "dql.json")
with open(conf_file, "w") as ofile:
json.dump(self.conf, ofile, indent=2)
def load_config(self):
""" Load your configuration settings from a file """
conf_file = os.path.join(self._conf_dir, "dql.json")
if not os.path.exists(conf_file):
return {}
with open(conf_file, "r") as ifile:
return json.load(ifile)
@repl_command
def do_opt(self, *_args, **kwargs):
""" Get and set options """
args = list(_args)
if not args:
largest = 0
keys = [key for key in self.conf if not key.startswith("_")]
for key in keys:
largest = max(largest, len(key))
for key in keys:
print("%s : %s" % (key.rjust(largest), self.conf[key]))
return
option = args.pop(0)
if not args and not kwargs:
method = getattr(self, "getopt_" + option, None)
if method is None:
self.getopt_default(option)
else:
method()
else:
method = getattr(self, "opt_" + option, None)
if method is None:
print("Unrecognized option %r" % option)
else:
method(*args, **kwargs)
self.save_config()
def help_opt(self):
""" Print the help text for options """
print(OPTIONS)
def getopt_default(self, option):
""" Default method to get an option """
if option not in self.conf:
print("Unrecognized option %r" % option)
return
print("%s: %s" % (option, self.conf[option]))
def complete_opt(self, text, line, begidx, endidx):
""" Autocomplete for options """
tokens = line.split()
if len(tokens) == 1:
if text:
return
else:
option = ""
else:
option = tokens[1]
if len(tokens) == 1 or (len(tokens) == 2 and text):
return [
name[4:] + " " for name in dir(self) if name.startswith("opt_" + text)
]
method = getattr(self, "complete_opt_" + option, None)
if method is not None:
return method(text, line, begidx, endidx) # pylint: disable=E1102
def opt_width(self, width):
""" Set width of output ('auto' will auto-detect terminal width) """
if width != "auto":
width = int(width)
self.conf["width"] = width
def complete_opt_width(self, *_):
""" Autocomplete for width option """
return ["auto"]
def opt_pagesize(self, pagesize):
""" Get or set the page size of the query output """
if pagesize != "auto":
pagesize = int(pagesize)
self.conf["pagesize"] = pagesize
def complete_opt_pagesize(self, *_):
""" Autocomplete for pagesize option """
return ["auto"]
def _print_enum_opt(self, option, choices):
""" Helper for enum options """
for key in choices:
if key == self.conf[option]:
print("* %s" % key)
else:
print(" %s" % key)
def opt_display(self, display):
""" Set value for display option """
key = get_enum_key(display, DISPLAYS)
if key is not None:
self.conf["display"] = key
self.display = DISPLAYS[key]
print("Set display %r" % key)
else:
print("Unknown display %r" % display)
def getopt_display(self):
""" Get value for display option """
self._print_enum_opt("display", DISPLAYS)
def complete_opt_display(self, text, *_):
""" Autocomplete for display option """
return [t + " " for t in DISPLAYS if t.startswith(text)]
def opt_format(self, fmt):
""" Set value for format option """
key = get_enum_key(fmt, FORMATTERS)
if key is not None:
self.conf["format"] = key
print("Set format %r" % key)
else:
print("Unknown format %r" % fmt)
def getopt_format(self):
""" Get value for format option """
self._print_enum_opt("format", FORMATTERS)
def complete_opt_format(self, text, *_):
""" Autocomplete for format option """
return [t + " " for t in FORMATTERS if t.startswith(text)]
def opt_allow_select_scan(self, allow):
""" Set option allow_select_scan """
allow = allow.lower() in ("true", "t", "yes", "y")
self.conf["allow_select_scan"] = allow
self.engine.allow_select_scan = allow
def complete_opt_allow_select_scan(self, text, *_):
""" Autocomplete for allow_select_scan option """
return [t for t in ("true", "false", "yes", "no") if t.startswith(text.lower())]
def opt_lossy_json_float(self, lossy):
""" Set option lossy_json_float """
lossy = lossy.lower() in ("true", "t", "yes", "y")
self.conf["lossy_json_float"] = lossy
def complete_opt_lossy_json_float(self, text, *_):
""" Autocomplete for lossy_json_float option """
return [t for t in ("true", "false", "yes", "no") if t.startswith(text.lower())]
@repl_command
def do_watch(self, *args):
""" Watch Dynamo tables consumed capacity """
tables = set()
if not self.engine.cached_descriptions:
self.engine.describe_all()
all_tables = list(self.engine.cached_descriptions)
for arg in args:
candidates = set((t for t in all_tables if fnmatch(t, arg)))
tables.update(candidates)
monitor = Monitor(self.engine, sorted(tables))
monitor.start()
def complete_watch(self, text, *_):
""" Autocomplete for watch """
return [t + " " for t in self.engine.cached_descriptions if t.startswith(text)]
@repl_command
def do_file(self, filename):
""" Read and execute a .dql file """
with open(filename, "r") as infile:
self._run_cmd(infile.read())
def complete_file(self, text, line, *_):
""" Autocomplete DQL file lookup """
leading = line[len("file ") :]
curpath = os.path.join(os.path.curdir, leading)
def isdql(parent, filename):
""" Check if a file is .dql or a dir """
return not filename.startswith(".") and (
os.path.isdir(os.path.join(parent, filename))
or filename.lower().endswith(".dql")
)
def addslash(path):
""" Append a slash if a file is a directory """
if path.lower().endswith(".dql"):
return path + " "
else:
return path + "/"
if not os.path.exists(curpath) or not os.path.isdir(curpath):
curpath = os.path.dirname(curpath)
return [
addslash(f)
for f in os.listdir(curpath)
if f.startswith(text) and isdql(curpath, f)
]
@repl_command
def do_ls(self, table: str = None) -> None:
""" List all tables or print details of one table """
if table is None:
table_descriptions = self.engine.describe_all()
else:
tables = list(self.engine.connection.list_tables())
filtered = [t for t in tables if fnmatch(t, table)]
if len(filtered) == 1:
print(
self.engine.describe(
filtered[0], refresh=True, metrics=True
).pformat()
)
return
elif len(filtered) == 0:
raise EngineRuntimeError("Table %r not found" % table)
else:
table_descriptions = [self.engine.describe(t, True) for t in filtered]
fields = OrderedDict(
[
("Name", "name"),
("Status", "status"),
("Read", "total_read_throughput"),
("Write", "total_write_throughput"),
]
)
# Calculate max width of all items for each column
sizes = [
1
+ max([len(str(getattr(t, f))) for t in table_descriptions] + [len(title)])
for title, f in fields.items()
]
# Print the header
for size, title in zip(sizes, fields):
print(title.ljust(size), end="")
print()
# Print each table row
for row_table in table_descriptions:
for size, field in zip(sizes, fields.values()):
print(str(getattr(row_table, field)).ljust(size), end="")
print()
def complete_ls(self, text, *_):
""" Autocomplete for ls """
return [t + " " for t in self.engine.cached_descriptions if t.startswith(text)]
@repl_command
def do_local(self, host="localhost", port=8000):
"""
Connect to a local DynamoDB instance. Use 'local off' to disable.
> local
> local host=localhost port=8001
> local off
"""
port = int(port)
if host == "off":
self._local_endpoint = None
else:
self._local_endpoint = (host, port)
self.onecmd("use %s" % self.engine.region)
@repl_command
def do_use(self, region):
"""
Switch the AWS region
> use us-west-1
> use us-east-1
"""
if self._local_endpoint is not None:
host, port = self._local_endpoint # pylint: disable=W0633
self.engine.connect(
region,
session=self.session,
host=host,
port=port,
is_secure=False,
access_key="",
secret_key="",
)
else:
self.engine.connect(region, session=self.session)
def complete_use(self, text, *_):
""" Autocomplete for use """
return [t + " " for t in REGIONS if t.startswith(text)]
@repl_command
def do_throttle(self, *_args):
"""
Set the allowed consumed throughput for DQL.
# Set the total allowed throughput across all tables
> throttle 1000 100
# Set the default allowed throughput per-table/index
> throttle default 40% 20%
# Set the allowed throughput on a table
> throttle mytable 10 10
# Set the allowed throughput on a global index
> throttle mytable myindex 40 6
see also: unthrottle
"""
args = list(_args)
if not args:
print(self.throttle)
return
if len(args) < 2:
return self.onecmd("help throttle")
args, read, write = args[:-2], args[-2], args[-1]
if len(args) == 2:
tablename, indexname = args # pylint: disable=W0632
self.throttle.set_index_limit(tablename, indexname, read, write)
elif len(args) == 1:
tablename = args[0]
if tablename == "default":
self.throttle.set_default_limit(read, write)
elif tablename == "total":
self.throttle.set_total_limit(read, write)
else:
self.throttle.set_table_limit(tablename, read, write)
elif not args:
self.throttle.set_total_limit(read, write)
else:
return self.onecmd("help throttle")
self.conf["_throttle"] = self.throttle.save()
self.save_config()
@repl_command
def do_unthrottle(self, *args):
"""
Remove the throughput limits for DQL that were set with 'throttle'
Examples:
---------
# Remove all limits
> unthrottle
# Remove the limit on total allowed throughput
> unthrottle total
# Remove the default limit
> unthrottle default
# Remove the limit on a table
> unthrottle mytable
# Remove the limit on a global index
> unthrottle mytable myindex
"""
if not args:
if promptyn("Are you sure you want to clear all throttles?"):
self.throttle.load({})
elif len(args) == 1:
tablename = args[0]
if tablename == "total":
self.throttle.set_total_limit()
elif tablename == "default":
self.throttle.set_default_limit()
else:
self.throttle.set_table_limit(tablename)
elif len(args) == 2:
tablename, indexname = args
self.throttle.set_index_limit(tablename, indexname)
else:
self.onecmd("help unthrottle")
self.conf["_throttle"] = self.throttle.save()
self.save_config()
def default(self, command):
""" This is an override of super class method. """
self._run_cmd(command)
def completedefault(self, text, line, *_):
""" Autocomplete table names in queries """
tokens = line.split()
try:
before = tokens[-2]
complete = before.lower() in ("from", "update", "table", "into")
if tokens[0].lower() == "dump":
complete = True
if complete:
return [
t + " "
for t in self.engine.cached_descriptions
if t.startswith(text)
]
except KeyError:
pass
def _run_cmd(self, command):
""" Run a DQL command """
if self.throttle:
tables = self.engine.describe_all(False)
limiter = self.throttle.get_limiter(tables)
else:
limiter = None
self.engine.rate_limit = limiter
results = self.engine.execute(command)
if results is None:
pass
elif isinstance(results, str):
if not self._silent:
print(results)
else:
with self.display() as ostream:
formatter = FORMATTERS[self.conf["format"]](
results,
ostream,
pagesize=self.conf["pagesize"],
width=self.conf["width"],
lossy_json_float=self.conf["lossy_json_float"],
)
formatter.display()
print_count = 0
total = None
for (cmd_fragment, capacity) in self.engine.consumed_capacities:
total += capacity
print(cmd_fragment)
print(indent(str(capacity)))
print_count += 1
if print_count > 1:
print("TOTAL")
print(indent(str(total)))
@repl_command
def do_EOF(self): # pylint: disable=C0103
"""Exit"""
return self.onecmd("exit")
@repl_command
def do_exit(self):
"""Exit"""
self.running = False
print()
self.history_manager.try_to_write_history()
return True
def run_command(
self, command: str, use_json: bool = False, raise_exceptions: bool = False
) -> None:
""" Run a command passed in from the command line with -c """
self.display = DISPLAYS["stdout"]
self.conf["pagesize"] = 0
if use_json:
self.conf["format"] = "json"
self._silent = True
if raise_exceptions:
self.onecmd(command)
else:
with exception_handler(self.engine):
self.onecmd(command)
def emptyline(self):
self.default("")
def help_help(self):
"""Print the help text for help"""
print("List commands or print details about a command")
def help_alter(self):
""" Print the help text for ALTER """
print(ALTER)
def help_analyze(self):
""" Print the help text for ANALYZE """
print(ANALYZE)
def help_create(self):
""" Print the help text for CREATE """
print(CREATE)
def help_delete(self):
""" Print the help text for DELETE """
print(DELETE)
def help_drop(self):
""" Print the help text for DROP """
print(DROP)
def help_dump(self):
""" Print the help text for DUMP """
print(DUMP)
def help_explain(self):
""" Print the help text for EXPLAIN """
print(EXPLAIN)
def help_insert(self):
""" Print the help text for INSERT """
print(INSERT)
def help_load(self):
""" Print the help text for LOAD """
print(LOAD)
def help_scan(self):
""" Print the help text for SCAN """
print(SCAN)
def help_select(self):
""" Print the help text for SELECT """
print(SELECT)
def help_update(self):
""" Print the help text for UPDATE """
print(UPDATE)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
teamProject/teamProject/wsgi.py
|
"""
WSGI config for teamProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teamProject.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
NickBlog/settings/base.py
|
"""
Django settings for NickBlog project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7zbpqsg*2z5gz8s32!#6-72tw$_n55j12io4#kzttye29_(uu7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# django-suit Admin界面美化插件
'suit',
'django.contrib.admin',
'django.contrib.auth',
# 'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# django-haystack 全局搜索插件
'haystack',
'article',
# DjangoUeditor 富文本编辑器插件
# 'DjangoUeditor',
# dj-pagination 自动分页插件
'dj_pagination',
# django-silk http请求和sql查询分析插件
'silk',
'account',
'shortNotes',
)
# 扩展AbstractUser模型,构建新的account应用需要的配置
AUTH_USER_MODEL = 'account.Account'
# 不记录跳转来的页面,登陆/注销后直接回到上一个页面
LOGOUT_REDIRECT_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/account/login/'
LOGOUT_URL = '/account/logout/'
# 设置模拟发送邮件到终端(对于数据库中有的邮箱地址)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# 添加自定义用户邮件验证后端
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'account.backends.EmailBackend',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# 分页中间件
'dj_pagination.middleware.PaginationMiddleware',
# 请求分析中间件
'silk.middleware.SilkyMiddleware',
)
ROOT_URLCONF = 'NickBlog.urls'
# 内置的Jinja2: django.template.backends.jinja2.Jinja2
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], # 可填多个路径,模板引擎按顺序查找模板源文件
'APP_DIRS': True, # 是否在按照的应用中查找模板,不完全受DIRS的限制
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# 模板上下文处理器
TEMPLATE_CONTEXT_PROCESSORS = (
# 启用dj-paginate
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
# django-suit Admin界面美化
"django.core.context_processors.request",
)
# 设置缓存后端:使用MemcachedCache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
# 缓存过期时间
'TIMEOUT': 60,
# 可选项
'OPTIONS': {
# 缓存允许的最大条目数
'MAX_ENTRIES': 100,
# 达到 MAX_ENTRIES 的时候,被删除的条目比率 1/MAX_ENTRIES
'CULL_FREQUENCY': 2,
}
}
}
WSGI_APPLICATION = 'NickBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# MySQL数据库配置项
MYSQL_OPTIONS = {
# 使用严格模式TRADITIONAL插入数据
'sql_mode': 'TRADITIONAL',
'charset': 'utf8',
# 连接到数据库时会执行init_command对应的命令,创建表之后就即可删除此选项
'init_command': """
SET default_storage_engine=INNODB;
SET character_set_connection=utf8,collation_connection=utf8_unicode_ci;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
"""
}
# MySQL数据库引擎
DATABASES = {
'default': {
# 事务回调API transaction_hooks
'ENGINE': 'django.db.backends.mysql',
'NAME': 'NickBlog',
'USER': os.environ.get('DATABASE_USER', 'root'),
'PASSWORD': os.environ.get('DATABASE_PASSWORD', 'mysql') or None,
'HOST': os.environ.get('DATABASE_HOST', '127.0.0.1'),
'PORT': os.environ.get('DATABASE_PORT', 3306),
# 指定数据库配置
'OPTIONS': MYSQL_OPTIONS,
# 设置数据库交互方式为事务
'ATOMIC_REQUESTS': True,
'TEST': {
'NAME': 'test_ralph_ng',
}
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# DEBUG->True -> 使用项目目录下的文件
# DEBUG->False -> 使用STATIC_ROOT目录下的文件
STATIC_URL = '/static/'
STATIC_ROOT = '/var/www/NickBlog/static/'
# 公共的static文件
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
# os.path.join(BASE_DIR, "media"),
)
# upload floder
# 模板中使用 img.img.url 代表资源路径
MEDIA_URL = '/media/' # 可访问url
# 无论是否debug,都会访问此路径下的资源(包括上传和访问)。如果是home路径需要先给出权限
MEDIA_ROOT = '/var/www/NickBlog/media/'
STATICFILES_FINDERS = ("django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",)
# @@@@@@@@@第三方插件配置@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# 分页插件 无效页面抛出404错误
PAGINATION_INVALID_PAGE_RAISES_404 = True
# 搜索设置
HAYSTACK_CONNECTIONS = {
'default': {
# 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'ENGINE': 'article.whoosh_cn_backend.WhooshEngine',
# 注意这个路径,路径错了就无法生成索引!
'PATH': os.path.join(os.path.dirname(os.path.dirname(__file__)), 'whoosh_index'),
},
}
# 设置搜索结果每页多少条
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 8
# 设置信号处理器
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# silk cprofile文件配置
# SILKY_PYTHON_PROFILER = True
# SILKY_PYTHON_PROFILER_BINARY = True
# SILKY_STORAGE_CLASS = 'silk.storage.ProfilerResultStorage'
# SILKY_PYTHON_PROFILER_RESULT_PATH = os.path.join(BASE_DIR, 'static/profiling')
# 必须登陆才能访问silk
SILKY_AUTHENTICATION = True # User must login
SILKY_AUTHORISATION = True # User must have permissions
# suit界面配置
# Django Suit configuration example
SUIT_CONFIG = {
# header
'ADMIN_NAME': '码练编辑系统',
# 左上角时间显示设置 https://docs.djangoproject.com/en/dev/ref/templates/builtins/#std:templatefilter-date
'HEADER_DATE_FORMAT': 'Y-m-j l',
'HEADER_TIME_FORMAT': 'H:i',
# forms
# 自动将星号符号添加*到每个必填字段标签的末尾
# 'SHOW_REQUIRED_ASTERISK': True, # Default True
# 离开未保存的表单时会提醒,设置为False则不会保存就离开
# 'CONFIRM_UNSAVED_CHANGES': True, # Default True
# menu
# 'SEARCH_URL': '/admin/auth/user/',
'MENU_ICONS': {
'sites': 'icon-leaf',
'auth': 'icon-lock',
},
# 'MENU_OPEN_FIRST_CHILD': True, # Default True
# 排除某个app和model
# 'MENU_EXCLUDE': ('auth.group',),
# 使用自定义菜单 针对左侧栏位
'MENU': (
'sites',
{'app': 'article', 'icon': 'icon-star', 'label': '文章'},
{'app': 'account', 'icon': 'icon-lock', 'label': '用户'},
{'app': 'shortnotes', 'icon': 'icon-leaf', 'label': '短笔记'},
{'app': 'auth', 'icon': 'icon-cog', 'label': '用户组', 'models': ('auth.group', )},
),
# misc
# 每页列出多少条 指定所有模型的列出
# 'LIST_PER_PAGE': 15,
}
|
[] |
[] |
[
"DATABASE_PORT",
"DATABASE_PASSWORD",
"DATABASE_USER",
"DATABASE_HOST"
] |
[]
|
["DATABASE_PORT", "DATABASE_PASSWORD", "DATABASE_USER", "DATABASE_HOST"]
|
python
| 4 | 0 | |
decoding.py
|
""" decoding utilities"""
import json
import re
import os
from os.path import join
import pickle as pkl
from itertools import starmap
from cytoolz import curry
import torch
from utils import PAD, UNK, START, END
from model.copy_summ import CopySumm
from model.extract import ExtractSumm, PtrExtractSumm
from model.rl import ActorCritic
from data.batcher import conver2id, pad_batch_tensorize
from data.data import CnnDmDataset
try:
DATASET_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
class DecodeDataset(CnnDmDataset):
""" get the article sentences only (for decoding use)"""
def __init__(self, split):
assert split in ['val', 'test']
super().__init__(split, DATASET_DIR)
def __getitem__(self, i):
js_data = super().__getitem__(i)
art_sents = js_data['article']
return art_sents
def make_html_safe(s):
"""Rouge use html, has to make output html safe"""
return s.replace("<", "<").replace(">", ">")
def load_best_ckpt(model_dir, reverse=False):
""" reverse=False->loss, reverse=True->reward/score"""
ckpts = os.listdir(join(model_dir, 'ckpt'))
ckpt_matcher = re.compile('^ckpt-.*-[0-9]*')
ckpts = sorted([c for c in ckpts if ckpt_matcher.match(c)],
key=lambda c: float(c.split('-')[1]), reverse=reverse)
print('loading checkpoint {}...'.format(ckpts[0]))
ckpt = torch.load(
join(model_dir, 'ckpt/{}'.format(ckpts[0]))
)['state_dict']
return ckpt
class Abstractor(object):
def __init__(self, abs_dir, max_len=30, cuda=True):
abs_meta = json.load(open(join(abs_dir, 'meta.json')))
assert abs_meta['net'] == 'base_abstractor'
abs_args = abs_meta['net_args']
abs_ckpt = load_best_ckpt(abs_dir)
word2id = pkl.load(open(join(abs_dir, 'vocab.pkl'), 'rb'))
abstractor = CopySumm(**abs_args)
abstractor.load_state_dict(abs_ckpt,strict=False)
self._device = torch.device('cuda' if cuda else 'cpu')
self._net = abstractor.to(self._device)
self._word2id = word2id
self._id2word = {i: w for w, i in word2id.items()}
self._max_len = max_len
def _prepro(self, raw_article_sents):
ext_word2id = dict(self._word2id)
ext_id2word = dict(self._id2word)
for raw_words in raw_article_sents:
for w in raw_words:
if not w in ext_word2id:
ext_word2id[w] = len(ext_word2id)
ext_id2word[len(ext_id2word)] = w
articles = conver2id(UNK, self._word2id, raw_article_sents)
art_lens = [len(art) for art in articles]
article = pad_batch_tensorize(articles, PAD, cuda=False
).to(self._device)
extend_arts = conver2id(UNK, ext_word2id, raw_article_sents)
extend_art = pad_batch_tensorize(extend_arts, PAD, cuda=False
).to(self._device)
extend_vsize = len(ext_word2id)
dec_args = (article, art_lens, extend_art, extend_vsize,
START, END, UNK, self._max_len)
return dec_args, ext_id2word
def __call__(self, raw_article_sents):
self._net.eval()
dec_args, id2word = self._prepro(raw_article_sents)
decs, attns = self._net.batch_decode(*dec_args)
def argmax(arr, keys):
return arr[max(range(len(arr)), key=lambda i: keys[i].item())]
dec_sents = []
for i, raw_words in enumerate(raw_article_sents):
dec = []
for id_, attn in zip(decs, attns):
if id_[i] == END:
break
elif id_[i] == UNK:
dec.append(argmax(raw_words, attn[i]))
else:
dec.append(id2word[id_[i].item()])
dec_sents.append(dec)
return dec_sents
class BeamAbstractor(Abstractor):
def __call__(self, raw_article_sents, beam_size=5, diverse=1.0):
self._net.eval()
dec_args, id2word = self._prepro(raw_article_sents)
dec_args = (*dec_args, beam_size, diverse)
all_beams = self._net.batched_beamsearch(*dec_args)
all_beams = list(starmap(_process_beam(id2word),
zip(all_beams, raw_article_sents)))
return all_beams
@curry
def _process_beam(id2word, beam, art_sent):
def process_hyp(hyp):
seq = []
for i, attn in zip(hyp.sequence[1:], hyp.attns[:-1]):
if i == UNK:
copy_word = art_sent[max(range(len(art_sent)),
key=lambda j: attn[j].item())]
seq.append(copy_word)
else:
seq.append(id2word[i])
hyp.sequence = seq
del hyp.hists
del hyp.attns
return hyp
return list(map(process_hyp, beam))
class Extractor(object):
def __init__(self, ext_dir, max_ext=5, cuda=True):
ext_meta = json.load(open(join(ext_dir, 'meta.json')))
if ext_meta['net'] == 'ml_ff_extractor':
ext_cls = ExtractSumm
elif ext_meta['net'] == 'ml_rnn_extractor':
ext_cls = PtrExtractSumm
else:
raise ValueError()
ext_ckpt = load_best_ckpt(ext_dir)
ext_args = ext_meta['net_args']
extractor = ext_cls(**ext_args)
extractor.load_state_dict(ext_ckpt)
word2id = pkl.load(open(join(ext_dir, 'vocab.pkl'), 'rb'))
self._device = torch.device('cuda' if cuda else 'cpu')
self._net = extractor.to(self._device)
self._word2id = word2id
self._id2word = {i: w for w, i in word2id.items()}
self._max_ext = max_ext
def __call__(self, raw_article_sents):
self._net.eval()
n_art = len(raw_article_sents)
articles = conver2id(UNK, self._word2id, raw_article_sents)
article = pad_batch_tensorize(articles, PAD, cuda=False
).to(self._device)
indices = self._net.extract([article], k=min(n_art, self._max_ext))
return indices
class ArticleBatcher(object):
def __init__(self, word2id, cuda=True):
self._device = torch.device('cuda' if cuda else 'cpu')
self._word2id = word2id
self._device = torch.device('cuda' if cuda else 'cpu')
def __call__(self, raw_article_sents):
articles = conver2id(UNK, self._word2id, raw_article_sents)
article = pad_batch_tensorize(articles, PAD, cuda=False
).to(self._device)
return article
class RLExtractor(object):
def __init__(self, ext_dir, cuda=True):
ext_meta = json.load(open(join(ext_dir, 'meta.json')))
assert ext_meta['net'] == 'rnn-ext_abs_rl'
ext_args = ext_meta['net_args']['extractor']['net_args']
word2id = pkl.load(open(join(ext_dir, 'agent_vocab.pkl'), 'rb'))
extractor = PtrExtractSumm(**ext_args)
agent = ActorCritic(extractor._sent_enc,
extractor._art_enc,
extractor._extractor,
ArticleBatcher(word2id, cuda))
ext_ckpt = load_best_ckpt(ext_dir, reverse=True)
agent.load_state_dict(ext_ckpt)
self._device = torch.device('cuda' if cuda else 'cpu')
self._net = agent.to(self._device)
self._word2id = word2id
self._id2word = {i: w for w, i in word2id.items()}
def __call__(self, raw_article_sents):
self._net.eval()
indices = self._net(raw_article_sents)
return indices
|
[] |
[] |
[
"DATA"
] |
[]
|
["DATA"]
|
python
| 1 | 0 | |
train_mine.py
|
from __future__ import print_function
import argparse
import sys
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.utils.data as data
import torchvision
import torchvision.transforms as transforms
from data_loader import SYSUData, RegDBData, TestData
from data_manager import *
from eval_metrics import eval_sysu, eval_regdb
from model_mine import embed_net
from utils import *
from loss import OriTripletLoss, CenterTripletLoss, CrossEntropyLabelSmooth, TripletLoss_WRT, MMD_Loss, MarginMMD_Loss
from tensorboardX import SummaryWriter
from re_rank import random_walk, k_reciprocal
from random_aug import RandomErasing
import numpy as np
np.set_printoptions(threshold=np.inf)
parser = argparse.ArgumentParser(description='PyTorch Cross-Modality Training')
parser.add_argument('--dataset', default='sysu', help='dataset name: regdb or sysu]')
parser.add_argument('--lr', default=0.1 , type=float, help='learning rate, 0.00035 for adam')
parser.add_argument('--optim', default='sgd', type=str, help='optimizer')
parser.add_argument('--arch', default='resnet50', type=str,
help='network baseline:resnet18 or resnet50')
parser.add_argument('--resume', '-r', default='', type=str,
help='resume from checkpoint')
parser.add_argument('--test-only', action='store_true', help='test only')
parser.add_argument('--model_path', default='save_model/', type=str,
help='model save path')
parser.add_argument('--save_epoch', default=100, type=int,
metavar='s', help='save model every 10 epochs')
parser.add_argument('--log_path', default='log/', type=str,
help='log save path')
parser.add_argument('--vis_log_path', default='log/vis_log/', type=str,
help='log save path')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--img_w', default=144, type=int,
metavar='imgw', help='img width')
parser.add_argument('--img_h', default=288, type=int,
metavar='imgh', help='img height')
parser.add_argument('--batch-size', default=4, type=int,
metavar='B', help='training batch size')
parser.add_argument('--test-batch', default=64, type=int,
metavar='tb', help='testing batch size')
parser.add_argument('--method', default='base', type=str,
metavar='m', help='method type: base or agw')
parser.add_argument('--margin', default=0.3, type=float,
metavar='margin', help='triplet loss margin')
parser.add_argument('--num_pos', default=4, type=int,
help='num of pos per identity in each modality')
parser.add_argument('--trial', default=1, type=int,
metavar='t', help='trial (only for RegDB dataset)')
parser.add_argument('--seed', default=0, type=int,
metavar='t', help='random seed')
parser.add_argument('--gpu', default='0', type=str,
help='gpu device ids for CUDA_VISIBLE_DEVICES')
parser.add_argument('--mode', default='all', type=str, help='all or indoor')
parser.add_argument('--share_net', default=3, type=int,
metavar='share', help='[1,2,3,4,5]the start number of shared network in the two-stream networks')
parser.add_argument('--re_rank', default='no', type=str, help='performing reranking. [random_walk | k_reciprocal | no]')
parser.add_argument('--pcb', default='off', type=str, help='performing PCB, on or off')
parser.add_argument('--w_center', default=2.0, type=float, help='the weight for center loss')
parser.add_argument('--local_feat_dim', default=256, type=int,
help='feature dimention of each local feature in PCB')
parser.add_argument('--num_strips', default=6, type=int,
help='num of local strips in PCB')
parser.add_argument('--aug', action='store_true', help='Use Random Erasing Augmentation')
parser.add_argument('--label_smooth', default='off', type=str, help='performing label smooth or not')
parser.add_argument('--dist_disc', type=str, help='Include Distribution Discripeancy Loss', default=None)
parser.add_argument('--margin_mmd', default=0, type=float, help='Value of Margin For MMD Loss')
parser.add_argument('--dist_w', default=0.25, type=float, help='Weight of Distribution Discrepancy Loss')
parser.add_argument('--run_name', type=str,
help='Run Name for following experiment', default='test_run')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
set_seed(args.seed)
dataset = args.dataset
if dataset == 'sysu':
data_path = './SYSU-MM01'
log_path = args.log_path + 'sysu_log/'
test_mode = [1, 2] # thermal to visible
elif dataset == 'regdb':
data_path = './RegDB/'
log_path = args.log_path + 'regdb_log/'
test_mode = [2, 1] # visible to thermal
checkpoint_path = args.model_path
if not os.path.isdir(log_path):
os.makedirs(log_path)
if not os.path.isdir(checkpoint_path):
os.makedirs(checkpoint_path)
if not os.path.isdir(args.vis_log_path):
os.makedirs(args.vis_log_path)
suffix = args.run_name + '_' + dataset+'_c_tri_pcb_{}_w_tri_{}'.format(args.pcb,args.w_center)
if args.pcb=='on':
suffix = suffix + '_s{}_f{}'.format(args.num_strips, args.local_feat_dim)
suffix = suffix + '_share_net{}'.format(args.share_net)
if args.method=='agw':
suffix = suffix + '_agw_k{}_p{}_lr_{}_seed_{}'.format(args.num_pos, args.batch_size, args.lr, args.seed)
else:
suffix = suffix + '_base_gm10_k{}_p{}_lr_{}_seed_{}'.format(args.num_pos, args.batch_size, args.lr, args.seed)
if not args.optim == 'sgd':
suffix = suffix + '_' + args.optim
if dataset == 'regdb':
suffix = suffix + '_trial_{}'.format(args.trial)
sys.stdout = Logger(log_path + suffix + '_os.txt')
vis_log_dir = args.vis_log_path + suffix + '/'
if not os.path.isdir(vis_log_dir):
os.makedirs(vis_log_dir)
writer = SummaryWriter(vis_log_dir)
print("==========\nArgs:{}\n==========".format(args))
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0
print('==> Loading data..')
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if args.aug:
transform_train = transforms.Compose([
transforms.ToPILImage(),
transforms.Pad(10),
transforms.RandomCrop((args.img_h, args.img_w)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
else:
transform_train = transforms.Compose([
transforms.ToPILImage(),
transforms.Pad(10),
transforms.RandomCrop((args.img_h, args.img_w)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((args.img_h, args.img_w)),
transforms.ToTensor(),
normalize,
])
end = time.time()
if dataset == 'sysu':
# training set
trainset = SYSUData(data_path, transform=transform_train)
# generate the idx of each person identity
color_pos, thermal_pos = GenIdx(trainset.train_color_label, trainset.train_thermal_label)
# testing set
query_img, query_label, query_cam = process_query_sysu(data_path, mode=args.mode)
gall_img, gall_label, gall_cam = process_gallery_sysu(data_path, mode=args.mode, trial=0)
elif dataset == 'regdb':
# training set
trainset = RegDBData(data_path, args.trial, transform=transform_train)
# generate the idx of each person identity
color_pos, thermal_pos = GenIdx(trainset.train_color_label, trainset.train_thermal_label)
# testing set
query_img, query_label = process_test_regdb(data_path, trial=args.trial, modal='visible')
gall_img, gall_label = process_test_regdb(data_path, trial=args.trial, modal='thermal')
gallset = TestData(gall_img, gall_label, transform=transform_test, img_size=(args.img_w, args.img_h))
queryset = TestData(query_img, query_label, transform=transform_test, img_size=(args.img_w, args.img_h))
# testing data loader
gall_loader = data.DataLoader(gallset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
query_loader = data.DataLoader(queryset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
n_class = len(np.unique(trainset.train_color_label))
nquery = len(query_label)
ngall = len(gall_label)
print('Dataset {} statistics:'.format(dataset))
print(' ------------------------------')
print(' subset | # ids | # images')
print(' ------------------------------')
print(' visible | {:5d} | {:8d}'.format(n_class, len(trainset.train_color_label)))
print(' thermal | {:5d} | {:8d}'.format(n_class, len(trainset.train_thermal_label)))
print(' ------------------------------')
print(' query | {:5d} | {:8d}'.format(len(np.unique(query_label)), nquery))
print(' gallery | {:5d} | {:8d}'.format(len(np.unique(gall_label)), ngall))
print(' ------------------------------')
print('Data Loading Time:\t {:.3f}'.format(time.time() - end))
print('==> Building model..')
if args.method =='base':
net = embed_net(n_class, no_local= 'off', gm_pool = 'on', arch=args.arch, share_net=args.share_net, pcb=args.pcb, local_feat_dim=args.local_feat_dim, num_strips=args.num_strips)
else:
net = embed_net(n_class, no_local= 'on', gm_pool = 'on', arch=args.arch, share_net=args.share_net, pcb=args.pcb)
net.to(device)
cudnn.benchmark = True
if len(args.resume) > 0:
model_path = checkpoint_path + args.resume
if os.path.isfile(model_path):
print('==> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(model_path)
start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['net'])
print('==> loaded checkpoint {} (epoch {})'
.format(args.resume, checkpoint['epoch']))
else:
print('==> no checkpoint found at {}'.format(args.resume))
# define loss function
if args.label_smooth == 'off':
criterion_id = nn.CrossEntropyLoss()
else:
criterion_id = CrossEntropyLabelSmooth(n_class)
if args.method == 'agw':
criterion_tri = TripletLoss_WRT()
else:
loader_batch = args.batch_size * args.num_pos
#criterion_tri= OriTripletLoss(batch_size=loader_batch, margin=args.margin)
criterion_tri= CenterTripletLoss(batch_size=loader_batch, margin=args.margin)
criterion_id.to(device)
criterion_tri.to(device)
criterion_mmd = MMD_Loss().to(device)
criterion_margin_mmd = MarginMMD_Loss(margin=args.margin_mmd).to(device)
if args.optim == 'sgd':
if args.pcb == 'on':
ignored_params = list(map(id, net.local_conv_list.parameters())) \
+ list(map(id, net.fc_list.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, net.parameters())
optimizer = optim.SGD([
{'params': base_params, 'lr': 0.1 * args.lr},
{'params': net.local_conv_list.parameters(), 'lr': args.lr},
{'params': net.fc_list.parameters(), 'lr': args.lr}
],
weight_decay=5e-4, momentum=0.9, nesterov=True)
else:
ignored_params = list(map(id, net.bottleneck.parameters())) \
+ list(map(id, net.classifier.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, net.parameters())
optimizer = optim.SGD([
{'params': base_params, 'lr': 0.1 * args.lr},
{'params': net.bottleneck.parameters(), 'lr': args.lr},
{'params': net.classifier.parameters(), 'lr': args.lr}],
weight_decay=5e-4, momentum=0.9, nesterov=True)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if epoch < 10:
lr = args.lr * (epoch + 1) / 10
elif epoch >= 10 and epoch < 20:
lr = args.lr
elif epoch >= 20 and epoch < 50:
lr = args.lr * 0.1
elif epoch >= 50:
lr = args.lr * 0.01
optimizer.param_groups[0]['lr'] = 0.1 * lr
for i in range(len(optimizer.param_groups) - 1):
optimizer.param_groups[i + 1]['lr'] = lr
return lr
def train(epoch):
current_lr = adjust_learning_rate(optimizer, epoch)
train_loss = AverageMeter()
id_loss = AverageMeter()
tri_loss = AverageMeter()
data_time = AverageMeter()
batch_time = AverageMeter()
correct = 0
total = 0
# switch to train mode
net.train()
end = time.time()
for batch_idx, (input1, input2, label1, label2) in enumerate(trainloader):
labels = torch.cat((label1, label2), 0)
input1 = Variable(input1.cuda())
input2 = Variable(input2.cuda())
labels = Variable(labels.cuda())
data_time.update(time.time() - end)
if args.pcb == 'on':
feat, out0, feat_all = net(input1, input2)
loss_id = criterion_id(out0[0], labels)
loss_tri_l, batch_acc = criterion_tri(feat[0], labels)
for i in range(len(feat)-1):
loss_id += criterion_id(out0[i+1], labels)
loss_tri_l += criterion_tri(feat[i+1], labels)[0]
loss_tri, batch_acc = criterion_tri(feat_all, labels)
loss_tri += loss_tri_l * args.w_center #
correct += batch_acc
loss = loss_id + loss_tri
else:
feat, out0 = net(input1, input2)
loss_id = criterion_id(out0, labels)
loss_tri, batch_acc = criterion_tri(feat, labels)
correct += (batch_acc / 2)
_, predicted = out0.max(1)
correct += (predicted.eq(labels).sum().item() / 2)
loss = loss_id + loss_tri * args.w_center #
if args.dist_disc == 'mmd':
## Apply Global MMD Loss on Pooling Layer
feat_rgb, feat_ir = torch.split(feat, [label1.size(0),label2.size(0)], dim=0)
loss_dist, l2max, expec = criterion_mmd(feat_rgb, feat_ir) ## Use Global MMD
elif args.dist_disc == 'margin_mmd':
## Apply Margin MMD-ID Loss on Pooling Layer
feat_rgb, feat_ir = torch.split(feat, [label1.size(0),label2.size(0)], dim=0)
loss_dist, l2max, expec = criterion_margin_mmd(feat_rgb, feat_ir) ## Use MMD-ID
if args.dist_disc is not None:
loss = loss + loss_dist * args.dist_w ## Add Discrepancy Loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update P
train_loss.update(loss.item(), 2 * input1.size(0))
id_loss.update(loss_id.item(), 2 * input1.size(0))
tri_loss.update(loss_tri, 2 * input1.size(0))
total += labels.size(0)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % 50 == 0:
print('Epoch: [{}][{}/{}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'lr:{:.3f} '
'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '
'iLoss: {id_loss.val:.4f} ({id_loss.avg:.4f}) '
'TLoss: {tri_loss.val:.4f} ({tri_loss.avg:.4f}) '
'Accu: {:.2f}'.format(
epoch, batch_idx, len(trainloader), current_lr,
100. * correct / total, batch_time=batch_time,
train_loss=train_loss, id_loss=id_loss,tri_loss=tri_loss))
writer.add_scalar('total_loss', train_loss.avg, epoch)
writer.add_scalar('id_loss', id_loss.avg, epoch)
writer.add_scalar('tri_loss', tri_loss.avg, epoch)
writer.add_scalar('lr', current_lr, epoch)
def test(epoch):
# switch to evaluation mode
net.eval()
print('Extracting Gallery Feature...')
start = time.time()
ptr = 0
if args.pcb == 'on':
feat_dim = args.num_strips * args.local_feat_dim
else:
feat_dim = 2048
gall_feat = np.zeros((ngall, feat_dim))
gall_feat_att = np.zeros((ngall, feat_dim))
with torch.no_grad():
for batch_idx, (input, label) in enumerate(gall_loader):
batch_num = input.size(0)
input = Variable(input.cuda())
if args.pcb == 'on':
feat = net(input, input, test_mode[0])
gall_feat[ptr:ptr + batch_num, :] = feat.detach().cpu().numpy()
else:
feat, feat_att = net(input, input, test_mode[0])
gall_feat[ptr:ptr + batch_num, :] = feat.detach().cpu().numpy()
gall_feat_att[ptr:ptr + batch_num, :] = feat_att.detach().cpu().numpy()
ptr = ptr + batch_num
print('Extracting Time:\t {:.3f}'.format(time.time() - start))
# switch to evaluation
net.eval()
print('Extracting Query Feature...')
start = time.time()
ptr = 0
query_feat = np.zeros((nquery, feat_dim))
query_feat_att = np.zeros((nquery, feat_dim))
with torch.no_grad():
for batch_idx, (input, label) in enumerate(query_loader):
batch_num = input.size(0)
input = Variable(input.cuda())
if args.pcb == 'on':
feat = net(input, input, test_mode[1])
query_feat[ptr:ptr + batch_num, :] = feat.detach().cpu().numpy()
else:
feat, feat_att = net(input, input, test_mode[1])
query_feat[ptr:ptr + batch_num, :] = feat.detach().cpu().numpy()
query_feat_att[ptr:ptr + batch_num, :] = feat_att.detach().cpu().numpy()
ptr = ptr + batch_num
print('Extracting Time:\t {:.3f}'.format(time.time() - start))
start = time.time()
if args.re_rank == 'random_walk':
distmat = random_walk(query_feat, gall_feat)
if args.pcb == 'off': distmat_att = random_walk(query_feat_att, gall_feat_att)
elif args.re_rank == 'k_reciprocal':
distmat = k_reciprocal(query_feat, gall_feat)
if args.pcb == 'off': distmat_att = k_reciprocal(query_feat_att, gall_feat_att)
elif args.re_rank == 'no':
# compute the similarity
distmat = -np.matmul(query_feat, np.transpose(gall_feat))
if args.pcb == 'off': distmat_att = -np.matmul(query_feat_att, np.transpose(gall_feat_att))
# evaluation
if dataset == 'regdb':
cmc, mAP, mINP = eval_regdb(distmat, query_label, gall_label)
if args.pcb == 'off': cmc_att, mAP_att, mINP_att = eval_regdb(distmat_att, query_label, gall_label)
elif dataset == 'sysu':
cmc, mAP, mINP = eval_sysu(distmat, query_label, gall_label, query_cam, gall_cam)
if args.pcb == 'off': cmc_att, mAP_att, mINP_att = eval_sysu(distmat_att, query_label, gall_label, query_cam, gall_cam)
print('Evaluation Time:\t {:.3f}'.format(time.time() - start))
writer.add_scalar('rank1', cmc[0], epoch)
writer.add_scalar('mAP', mAP, epoch)
writer.add_scalar('mINP', mINP, epoch)
if args.pcb == 'off':
writer.add_scalar('rank1_att', cmc_att[0], epoch)
writer.add_scalar('mAP_att', mAP_att, epoch)
writer.add_scalar('mINP_att', mINP_att, epoch)
return cmc, mAP, mINP, cmc_att, mAP_att, mINP_att
else:
return cmc, mAP, mINP
# training
print('==> Start Training...')
for epoch in range(start_epoch, 61 - start_epoch):
print('==> Preparing Data Loader...')
# identity sampler
sampler = IdentitySampler(trainset.train_color_label, \
trainset.train_thermal_label, color_pos, thermal_pos, args.num_pos, args.batch_size,
epoch)
trainset.cIndex = sampler.index1 # color index
trainset.tIndex = sampler.index2 # thermal index
print(epoch)
loader_batch = args.batch_size * args.num_pos
trainloader = data.DataLoader(trainset, batch_size=loader_batch, \
sampler=sampler, num_workers=args.workers, drop_last=True)
# training
train(epoch)
if epoch > 9 and epoch % 2 == 0:
print('Test Epoch: {}'.format(epoch))
# testing
if args.pcb == 'off':
cmc, mAP, mINP, cmc_fc, mAP_fc, mINP_fc = test(epoch)
else:
cmc_fc, mAP_fc, mINP_fc = test(epoch)
# save model
if cmc_fc[0] > best_acc: # not the real best for sysu-mm01
best_acc = cmc_fc[0]
best_epoch = epoch
best_mAP = mAP_fc
best_mINP = mINP_fc
state = {
'net': net.state_dict(),
'cmc': cmc_fc,
'mAP': mAP_fc,
'mINP': mINP_fc,
'epoch': epoch,
}
torch.save(state, checkpoint_path + suffix + '_best.t')
if args.pcb == 'off':
print('POOL: Rank-1: {:.2%} | Rank-5: {:.2%} | Rank-10: {:.2%}| Rank-20: {:.2%}| mAP: {:.2%}| mINP: {:.2%}'.format(
cmc[0], cmc[4], cmc[9], cmc[19], mAP, mINP))
print('FC: Rank-1: {:.2%} | Rank-5: {:.2%} | Rank-10: {:.2%}| Rank-20: {:.2%}| mAP: {:.2%}| mINP: {:.2%}'.format(
cmc_fc[0], cmc_fc[4], cmc_fc[9], cmc_fc[19], mAP_fc, mINP_fc))
print('Best Epoch [{}], Rank-1: {:.2%} | mAP: {:.2%}| mINP: {:.2%}'.format(best_epoch, best_acc, best_mAP, best_mINP))
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
node/pkg/allocateip/allocateip.go
|
// Copyright (c) 2018-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package allocateip
import (
"context"
"fmt"
gnet "net"
"os"
"reflect"
"time"
log "github.com/sirupsen/logrus"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
felixconfig "github.com/projectcalico/calico/felix/config"
"github.com/projectcalico/calico/libcalico-go/lib/apiconfig"
libapi "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
bapi "github.com/projectcalico/calico/libcalico-go/lib/backend/api"
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
"github.com/projectcalico/calico/libcalico-go/lib/backend/syncersv1/tunnelipsyncer"
client "github.com/projectcalico/calico/libcalico-go/lib/clientv3"
cerrors "github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/ipam"
"github.com/projectcalico/calico/libcalico-go/lib/net"
"github.com/projectcalico/calico/libcalico-go/lib/options"
"github.com/projectcalico/calico/node/buildinfo"
"github.com/projectcalico/calico/node/pkg/calicoclient"
"github.com/projectcalico/calico/typha/pkg/syncclientutils"
"github.com/projectcalico/calico/typha/pkg/syncproto"
)
// This file contains the main processing and common logic for assigning tunnel addresses,
// used by calico/node to set the host's tunnel address if IPIP or VXLAN is enabled on the pool or
// wireguard is enabled on the node.
//
// It will assign an address if there are any available, and remove any tunnel addresses
// that are configured and should no longer be.
// Run runs the tunnel ip allocator. If done is nil, it runs in single-shot mode. If non-nil, it runs in daemon mode
// performing a reconciliation when IP pool or node configuration changes that may impact the allocations.
func Run(done <-chan struct{}) {
// This binary is only ever invoked _after_ the
// startup binary has been invoked and the modified environments have
// been sourced. Therefore, the NODENAME environment will always be
// set at this point.
nodename := os.Getenv("NODENAME")
if nodename == "" {
log.Panic("NODENAME environment is not set")
}
// Load felix environment configuration. Note that this does not perform the full hierarchical load of felix
// configuration nor does this watch for changes, so if it is critical that the configuration value used is correct
// and may be defined outside of an environment variable, do not use this.
felixEnvConfig := loadFelixEnvConfig()
// Load the client config from environment.
cfg, c := calicoclient.CreateClient()
run(nodename, cfg, c, felixEnvConfig, done)
}
func run(
nodename string, cfg *apiconfig.CalicoAPIConfig, c client.Interface,
felixEnvConfig *felixconfig.Config, done <-chan struct{},
) {
// If configured to use host-local IPAM, there is no need to configure tunnel addresses as they use the
// first IP of the pod CIDR - this is handled in the k8s backend code in libcalico-go.
if cfg.Spec.K8sUsePodCIDR {
log.Debug("Using host-local IPAM, no need to allocate a tunnel IP")
if done != nil {
// If a done channel is specified, only exit when this is closed.
<-done
}
return
}
if done == nil {
// Running in single shot mode, so assign addresses and exit.
reconcileTunnelAddrs(nodename, cfg, c, felixEnvConfig)
return
}
// This is running as a daemon. Create a long-running reconciler.
r := &reconciler{
nodename: nodename,
cfg: cfg,
client: c,
ch: make(chan struct{}),
data: make(map[string]interface{}),
felixEnvConfig: felixEnvConfig,
}
// Either create a typha syncclient or a local syncer depending on configuration. This calls back into the
// reconciler to trigger updates when necessary.
// Read Typha settings from the environment.
// When Typha is in use, there will already be variables prefixed with FELIX_, so it's
// convenient if we honor those as well as the CALICO variables.
typhaConfig := syncclientutils.ReadTyphaConfig([]string{"FELIX_", "CALICO_"})
if syncclientutils.MustStartSyncerClientIfTyphaConfigured(
&typhaConfig, syncproto.SyncerTypeTunnelIPAllocation,
buildinfo.GitVersion, nodename, fmt.Sprintf("tunnel-ip-allocation %s", buildinfo.GitVersion),
r,
) {
log.Debug("Using typha syncclient")
} else {
// Use the syncer locally.
log.Debug("Using local syncer")
syncer := tunnelipsyncer.New(c.(backendClientAccessor).Backend(), r, nodename)
syncer.Start()
}
// Run the reconciler.
r.run(done)
}
// reconciler watches IPPool and Node configuration and triggers a reconciliation of the Tunnel IP addresses whenever
// it spots a configuration change that may impact IP selection.
type reconciler struct {
nodename string
cfg *apiconfig.CalicoAPIConfig
client client.Interface
ch chan struct{}
data map[string]interface{}
felixEnvConfig *felixconfig.Config
inSync bool
}
// run is the main reconciliation loop, it loops until done.
func (r reconciler) run(done <-chan struct{}) {
// Loop forever, updating whenever we get a kick. The first kick will happen as soon as the syncer is in sync.
for {
select {
case <-r.ch:
// Received an update that requires reconciliation. If the reconciliation fails it will cause the daemon
// to exit this is fine - it will be restarted, and the syncer will trigger a reconciliation when in-sync
// again.
reconcileTunnelAddrs(r.nodename, r.cfg, r.client, r.felixEnvConfig)
case <-done:
return
}
}
}
// OnStatusUpdated handles the syncer status callback method.
func (r *reconciler) OnStatusUpdated(status bapi.SyncStatus) {
if status == bapi.InSync {
// We are in-sync, trigger an initial scan/update of the IP addresses.
r.inSync = true
r.ch <- struct{}{}
}
}
// OnUpdates handles the syncer resource updates.
func (r *reconciler) OnUpdates(updates []bapi.Update) {
var updated bool
for _, u := range updates {
switch u.UpdateType {
case bapi.UpdateTypeKVDeleted:
// Resource is deleted. If this resource is in our cache then trigger an update.
if _, ok := r.data[u.Key.String()]; ok {
updated = true
}
delete(r.data, u.Key.String())
case bapi.UpdateTypeKVNew, bapi.UpdateTypeKVUpdated:
// Resource is created or updated. Depending on the resource, we extract and cache the relevant data that
// we are monitoring. If the data has changed then trigger an update.
var data interface{}
switch v := u.Value.(type) {
case *model.IPPool:
// For pools just track the whole data.
log.Debugf("Updated pool resource: %s", u.Key)
data = v
case *libapi.Node:
// For nodes, we only care about our own node, *and* we only care about the wireguard public key.
if v.Name != r.nodename {
continue
}
log.Debugf("Updated node resource: %s", u.Key)
data = v.Status.WireguardPublicKey
default:
// We got an update for an unexpected resource type. Rather than ignore, just treat as updated so that
// we reconcile the addresses.
log.Warningf("Unexpected resource update: %s", u.Key)
updated = true
continue
}
if existing, ok := r.data[u.Key.String()]; !ok || !reflect.DeepEqual(existing, data) {
// Entry is new or associated data is modified. In either case update the data and flag as updated.
log.Debug("Stored data has been modified - trigger reconciliation")
updated = true
r.data[u.Key.String()] = data
}
}
}
if updated && r.inSync {
// We have updated data. Trigger a reconciliation, but don't block if there is already an update pending.
select {
case r.ch <- struct{}{}:
default:
}
}
}
// reconcileTunnelAddrs performs a single shot update of the tunnel IP allocations.
func reconcileTunnelAddrs(
nodename string, cfg *apiconfig.CalicoAPIConfig, c client.Interface, felixEnvConfig *felixconfig.Config,
) {
ctx := context.Background()
// Get node resource for given nodename.
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
log.WithError(err).Fatalf("failed to fetch node resource '%s'", nodename)
}
// Get list of ip pools
ipPoolList, err := c.IPPools().List(ctx, options.ListOptions{})
if err != nil {
log.WithError(err).Fatal("Unable to query IP pool configuration")
}
// If wireguard is enabled then allocate an IP for the wireguard device. We do this for all deployment types even
// when pod CIDRs are not managed by Calico.
if cidrs := determineEnabledPoolCIDRs(*node, *ipPoolList, felixEnvConfig, ipam.AttributeTypeWireguard); len(cidrs) > 0 {
ensureHostTunnelAddress(ctx, c, nodename, cidrs, ipam.AttributeTypeWireguard)
} else {
removeHostTunnelAddr(ctx, c, nodename, ipam.AttributeTypeWireguard)
}
// Query the IPIP enabled pools and either configure the tunnel
// address, or remove it.
if cidrs := determineEnabledPoolCIDRs(*node, *ipPoolList, felixEnvConfig, ipam.AttributeTypeIPIP); len(cidrs) > 0 {
ensureHostTunnelAddress(ctx, c, nodename, cidrs, ipam.AttributeTypeIPIP)
} else {
removeHostTunnelAddr(ctx, c, nodename, ipam.AttributeTypeIPIP)
}
// Query the IPv4 VXLAN enabled pools and either configure the tunnel
// address, or remove it.
if cidrs := determineEnabledPoolCIDRs(*node, *ipPoolList, felixEnvConfig, ipam.AttributeTypeVXLAN); len(cidrs) > 0 {
ensureHostTunnelAddress(ctx, c, nodename, cidrs, ipam.AttributeTypeVXLAN)
} else {
removeHostTunnelAddr(ctx, c, nodename, ipam.AttributeTypeVXLAN)
}
// Query the IPv6 VXLAN enabled pools and either configure the tunnel
// address, or remove it.
if cidrs := determineEnabledPoolCIDRs(*node, *ipPoolList, felixEnvConfig, ipam.AttributeTypeVXLANV6); len(cidrs) > 0 {
ensureHostTunnelAddress(ctx, c, nodename, cidrs, ipam.AttributeTypeVXLANV6)
} else {
removeHostTunnelAddr(ctx, c, nodename, ipam.AttributeTypeVXLANV6)
}
}
func ensureHostTunnelAddress(ctx context.Context, c client.Interface, nodename string, cidrs []net.IPNet, attrType string) {
logCtx := getLogger(attrType)
logCtx.WithField("node", nodename).Debug("Ensure tunnel address is set")
// Get the currently configured address.
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
logCtx.WithError(err).Fatalf("Unable to retrieve tunnel address. Error getting node '%s'", nodename)
}
// Get the address and ipam attribute string
var addr string
switch attrType {
case ipam.AttributeTypeVXLAN:
addr = node.Spec.IPv4VXLANTunnelAddr
case ipam.AttributeTypeVXLANV6:
addr = node.Spec.IPv6VXLANTunnelAddr
case ipam.AttributeTypeIPIP:
if node.Spec.BGP != nil {
addr = node.Spec.BGP.IPv4IPIPTunnelAddr
}
case ipam.AttributeTypeWireguard:
if node.Spec.Wireguard != nil {
addr = node.Spec.Wireguard.InterfaceIPv4Address
}
}
// Work out if we need to assign a tunnel address.
// In most cases we should not release current address and should assign new one.
release := false
assign := true
if addr == "" {
// The tunnel has no IP address assigned, assign one.
logCtx.Info("Assign a new tunnel address")
// Defensively release any IP addresses with this handle. This covers a theoretical case
// where the node object has lost its reference to its IP, but the allocation still exists
// in IPAM. For example, if the node object was manually edited.
release = true
} else {
// Go ahead checking status of current address.
ipAddr := gnet.ParseIP(addr)
if ipAddr == nil {
logCtx.WithError(err).Fatalf("Failed to parse the CIDR '%s'", addr)
}
// Check if we got correct assignment attributes.
attr, handle, err := c.IPAM().GetAssignmentAttributes(ctx, net.IP{IP: ipAddr})
if err == nil {
if attr[ipam.AttributeType] == attrType && attr[ipam.AttributeNode] == nodename {
// The tunnel address is still assigned to this node, but is it in the correct pool this time?
if !isIpInPool(addr, cidrs) {
// Wrong pool, release this address.
logCtx.WithField("currentAddr", addr).Info("Current address is not in a valid pool, release it and reassign")
release = true
} else {
// Correct pool, keep this address.
logCtx.WithField("currentAddr", addr).Info("Current address is still valid, do nothing")
assign = false
}
} else if len(attr) == 0 {
// No attributes means that this is an old address, assigned by code that didn't use
// allocation attributes. It might be a pod address, or it might be a node tunnel
// address. The only way to tell is by the existence of a handle, since workload
// addresses have always used a handle, whereas tunnel addresses didn't start
// using handles until the same time as they got allocation attributes.
if handle != nil {
// Handle exists, so this address belongs to a workload. We need to assign
// a new one for the node, but we shouldn't clean up the old address.
logCtx.WithField("currentAddr", addr).Info("Current address is occupied, assign a new one")
} else {
// Handle does not exist. This is just an old tunnel address that comes from
// a time before we used handles and allocation attributes. Attempt to
// reassign the same address, but now with metadata. It's possible that someone
// else takes the address while we do this, in which case we'll just
// need to assign a new address.
if err := correctAllocationWithHandle(ctx, c, addr, nodename, attrType); err != nil {
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {
// Unknown error attempting to allocate the address. Exit.
logCtx.WithError(err).Fatal("Error correcting tunnel IP allocation")
}
// The address was taken by someone else. We need to assign a new one.
logCtx.WithError(err).Warn("Failed to correct missing attributes, will assign a new address")
} else {
// We corrected the address, we can just return.
logCtx.Info("Updated tunnel address with allocation attributes")
return
}
}
} else {
// The allocation has attributes, but doesn't belong to us. Assign a new one.
logCtx.WithField("currentAddr", addr).Info("Current address is occupied, assign a new one")
}
} else if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
// The tunnel address is not assigned, reassign it.
logCtx.WithField("currentAddr", addr).Info("Current address is not assigned, assign a new one")
// Defensively release any IP addresses with this handle. This covers a theoretical case
// where the node object has lost its reference to its correct IP, but the allocation still exists
// in IPAM. For example, if the node object was manually edited.
release = true
} else {
// Failed to get assignment attributes, datastore connection issues possible, panic
logCtx.WithError(err).Panicf("Failed to get assignment attributes for CIDR '%s'", addr)
}
}
if release {
logCtx.WithField("IP", addr).Info("Release any old tunnel addresses")
handle, _ := generateHandleAndAttributes(nodename, attrType)
if err := c.IPAM().ReleaseByHandle(ctx, handle); err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
logCtx.WithError(err).Fatal("Failed to release old addresses")
}
// No existing allocations for this node.
}
}
if assign {
logCtx.WithField("IP", addr).Info("Assign new tunnel address")
assignHostTunnelAddr(ctx, c, nodename, cidrs, attrType)
}
}
func correctAllocationWithHandle(ctx context.Context, c client.Interface, addr, nodename string, attrType string) error {
ipAddr := net.ParseIP(addr)
if ipAddr == nil {
log.Fatalf("Failed to parse node tunnel address '%s'", addr)
return nil
}
// Release the old allocation.
_, err := c.IPAM().ReleaseIPs(ctx, ipam.ReleaseOptions{Address: ipAddr.String()})
if err != nil {
// If we fail to release the old allocation, we shouldn't continue any further. Just exit.
log.WithField("IP", ipAddr.String()).WithError(err).Fatal("Error releasing address")
}
// Attempt to re-assign the same address, but with a handle this time.
handle, attrs := generateHandleAndAttributes(nodename, attrType)
args := ipam.AssignIPArgs{
IP: *ipAddr,
HandleID: &handle,
Attrs: attrs,
Hostname: nodename,
}
// If we fail to allocate the same IP, return an error. We'll just
// have to allocate a new one.
return c.IPAM().AssignIP(ctx, args)
}
func generateHandleAndAttributes(nodename string, attrType string) (string, map[string]string) {
attrs := map[string]string{ipam.AttributeNode: nodename}
var handle string
switch attrType {
case ipam.AttributeTypeVXLAN:
handle = fmt.Sprintf("vxlan-tunnel-addr-%s", nodename)
case ipam.AttributeTypeVXLANV6:
handle = fmt.Sprintf("vxlan-v6-tunnel-addr-%s", nodename)
case ipam.AttributeTypeIPIP:
handle = fmt.Sprintf("ipip-tunnel-addr-%s", nodename)
case ipam.AttributeTypeWireguard:
handle = fmt.Sprintf("wireguard-tunnel-addr-%s", nodename)
}
attrs[ipam.AttributeType] = attrType
return handle, attrs
}
// assignHostTunnelAddr claims an IP address from the first pool
// with some space. Stores the result in the host's config as its tunnel
// address. It will assign a VXLAN address if vxlan is true, otherwise an IPIP address.
func assignHostTunnelAddr(ctx context.Context, c client.Interface, nodename string, cidrs []net.IPNet, attrType string) {
// Build attributes and handle for this allocation.
handle, attrs := generateHandleAndAttributes(nodename, attrType)
logCtx := getLogger(attrType)
if len(cidrs) == 0 {
logCtx.Fatal("Unable to assign an address: no IP pools provided")
}
args := ipam.AutoAssignArgs{
HandleID: &handle,
Attrs: attrs,
Hostname: nodename,
IntendedUse: api.IPPoolAllowedUseTunnel,
}
switch cidrs[0].Version() {
case 4:
args.Num4 = 1
args.IPv4Pools = cidrs
case 6:
args.Num6 = 1
args.IPv6Pools = cidrs
default:
logCtx.Panicf("Unable to assign an address: invalid IP version for IP pool %v", cidrs[0])
}
v4Assignments, v6Assignments, err := c.IPAM().AutoAssign(ctx, args)
if err != nil {
logCtx.WithError(err).Fatal("Unable to autoassign an address")
}
ip := ""
if v4Assignments != nil {
if err := v4Assignments.PartialFulfillmentError(); err != nil {
logCtx.WithError(err).Fatal("Unable to autoassign an IPv4 address")
}
ip = v4Assignments.IPs[0].IP.String()
}
if v6Assignments != nil {
if err := v6Assignments.PartialFulfillmentError(); err != nil {
logCtx.WithError(err).Fatal("Unable to autoassign an IPv6 address")
}
ip = v6Assignments.IPs[0].IP.String()
}
// Update the node object with the assigned address.
if err = updateNodeWithAddress(ctx, c, nodename, ip, attrType); err != nil {
// We hit an error, so release the IP address before exiting.
err := c.IPAM().ReleaseByHandle(ctx, handle)
if err != nil {
logCtx.WithError(err).WithField("IP", ip).Errorf("Error releasing IP address on failure")
}
// Log the error and exit with exit code 1.
logCtx.WithError(err).WithField("IP", ip).Fatal("Unable to set tunnel address")
}
logCtx.WithField("IP", ip).Info("Assigned tunnel address to node")
}
func updateNodeWithAddress(ctx context.Context, c client.Interface, nodename string, addr string, attrType string) error {
// If the update fails with ResourceConflict error then retry 5 times with 1 second delay before failing.
for i := 0; i < 5; i++ {
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
return err
}
switch attrType {
case ipam.AttributeTypeVXLAN:
node.Spec.IPv4VXLANTunnelAddr = addr
case ipam.AttributeTypeVXLANV6:
node.Spec.IPv6VXLANTunnelAddr = addr
case ipam.AttributeTypeIPIP:
if node.Spec.BGP == nil {
node.Spec.BGP = &libapi.NodeBGPSpec{}
}
node.Spec.BGP.IPv4IPIPTunnelAddr = addr
case ipam.AttributeTypeWireguard:
if node.Spec.Wireguard == nil {
node.Spec.Wireguard = &libapi.NodeWireguardSpec{}
}
node.Spec.Wireguard.InterfaceIPv4Address = addr
}
_, err = c.Nodes().Update(ctx, node, options.SetOptions{})
if err != nil {
if _, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
// Wait for a second and try again if there was a conflict during the resource update.
log.WithField("node", node.Name).Info("Resource update conflict error updating node, retrying.")
time.Sleep(1 * time.Second)
continue
}
log.WithField("node", node.Name).WithError(err).Warning("Error updating node")
}
return err
}
return fmt.Errorf("Too many retries attempting to update node with tunnel address")
}
// removeHostTunnelAddr removes any existing IP address for this host's
// tunnel device and releases the IP from IPAM. If no IP is assigned this function
// is a no-op.
func removeHostTunnelAddr(ctx context.Context, c client.Interface, nodename string, attrType string) {
var updateError error
logCtx := getLogger(attrType)
logCtx.WithField("node", nodename).Debug("Remove tunnel addresses")
// If the update fails with ResourceConflict error then retry 5 times with 1 second delay before failing.
for i := 0; i < 5; i++ {
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
logCtx.WithError(err).Fatalf("Unable to retrieve tunnel address for cleanup. Error getting node '%s'", nodename)
}
// Find out the currently assigned address and remove it from the node.
var ipAddrStr string
var ipAddr *net.IP
switch attrType {
case ipam.AttributeTypeVXLAN:
ipAddrStr = node.Spec.IPv4VXLANTunnelAddr
node.Spec.IPv4VXLANTunnelAddr = ""
case ipam.AttributeTypeVXLANV6:
ipAddrStr = node.Spec.IPv6VXLANTunnelAddr
node.Spec.IPv6VXLANTunnelAddr = ""
case ipam.AttributeTypeIPIP:
if node.Spec.BGP != nil {
ipAddrStr = node.Spec.BGP.IPv4IPIPTunnelAddr
node.Spec.BGP.IPv4IPIPTunnelAddr = ""
// If removing the tunnel address causes the BGP spec to be empty, then nil it out.
// libcalico asserts that if a BGP spec is present, that it not be empty.
if reflect.DeepEqual(*node.Spec.BGP, libapi.NodeBGPSpec{}) {
logCtx.Debug("BGP spec is now empty, setting to nil")
node.Spec.BGP = nil
}
}
case ipam.AttributeTypeWireguard:
if node.Spec.Wireguard != nil {
ipAddrStr = node.Spec.Wireguard.InterfaceIPv4Address
node.Spec.Wireguard.InterfaceIPv4Address = ""
if reflect.DeepEqual(*node.Spec.Wireguard, libapi.NodeWireguardSpec{}) {
logCtx.Debug("Wireguard spec is now empty, setting to nil")
node.Spec.Wireguard = nil
}
}
}
if ipAddrStr != "" {
ipAddr = net.ParseIP(ipAddrStr)
}
// Release tunnel IP address(es) for the node.
handle, _ := generateHandleAndAttributes(nodename, attrType)
if err := c.IPAM().ReleaseByHandle(ctx, handle); err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
// Unknown error releasing the address.
logCtx.WithError(err).WithFields(log.Fields{
"IP": ipAddrStr,
"Handle": handle,
}).Fatal("Error releasing address by handle")
}
// Resource does not exist. This can occur in a few scenarios:
//
// 1. The IP is genuinely not allocated, and there's nothing for us to do.
// 2. The IP pre-dates the use of handles in this code, and so the handle doesn't exist.
// 3. We have gotten into an invalid state where the handle has been deleted but the IP is still allocated.
//
// For scenario #1, there is no more work to do.
// We can determine if we're encountering scenario #2 or #3 by inspecting the allocation's attributes.
// For scenario #2, we expect no attributes and no handle to be stored with the allocation.
// For scenario #3, we expect a handle in the attributes and it should match the expected value.
if ipAddr != nil {
// There are no addresses with this handle. If there is an IP configured on the node, check to see if it
// belongs to us. If it has no handle and no attributes, then we can pretty confidently
// say that it belongs to us rather than a pod and should be cleaned up.
logCtx.WithField("handle", handle).Info("No IPs with handle, release exact IP")
attr, storedHandle, err := c.IPAM().GetAssignmentAttributes(ctx, *ipAddr)
if err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
logCtx.WithError(err).Fatal("Failed to query attributes")
}
// Scenario #1: The allocation actually doesn't exist, we don't have anything to do.
} else if len(attr) == 0 && storedHandle == nil {
// Scenario #2: The allocation exists, but has no handle whatsoever.
// This is an ancient allocation and can be released.
if _, err := c.IPAM().ReleaseIPs(ctx, ipam.ReleaseOptions{Address: ipAddr.String()}); err != nil {
logCtx.WithError(err).WithField("IP", ipAddr.String()).Fatal("Error releasing address from IPAM")
}
} else if storedHandle != nil && *storedHandle == handle {
// Scenario #3: The allocation exists, has a handle, and it matches the one we expect.
// This means the handle object itself was wrongfully deleted. We can clean it up
// by releasing the IP directly with both address and handle specified.
if _, err := c.IPAM().ReleaseIPs(ctx, ipam.ReleaseOptions{Address: ipAddr.String(), Handle: handle}); err != nil {
logCtx.WithError(err).WithField("IP", ipAddr.String()).Fatal("Error releasing address from IPAM")
}
} else {
// The final scenario: the IP on the node is allocated, but it is allocated to some other handle.
// It doesn't belong to us. We can't do anything here but it's worth logging.
fields := log.Fields{"attributes": attr, "IP": ipAddr.String()}
logCtx.WithFields(fields).Warnf("IP address has been reused by something else")
}
}
}
// Update the node object.
_, updateError = c.Nodes().Update(ctx, node, options.SetOptions{})
if _, ok := updateError.(cerrors.ErrorResourceUpdateConflict); ok {
// Wait for a second and try again if there was a conflict during the resource update.
logCtx.Infof("Error updating node %s: %s. Retrying.", node.Name, err)
time.Sleep(1 * time.Second)
continue
}
break
}
// Check to see if there was still an error after the retry loop,
// and log and exit if there was an error.
if updateError != nil {
// We hit an error, so release the IP address before exiting.
// Log the error and exit with exit code 1.
logCtx.WithError(updateError).Fatal("Unable to remove tunnel address")
}
}
// determineEnabledPools returns all enabled pools. If vxlan is true, then it will only return VXLAN pools. Otherwise
// it will only return IPIP enabled pools.
func determineEnabledPoolCIDRs(
node libapi.Node, ipPoolList api.IPPoolList, felixEnvConfig *felixconfig.Config, attrType string,
) []net.IPNet {
// For wireguard, an IP is only allocated from a pool if wireguard is actually running (there will be a public
// key configured on the node), and the cluster is not running in host encryption mode (which is required for
// managed cloud with non-Calico CNI). When running in host encryption mode, the wireguard dataplane will use the
// node IP for the device.
if attrType == ipam.AttributeTypeWireguard {
if felixEnvConfig.WireguardHostEncryptionEnabled {
log.Debug("Wireguard is running in host encryption mode, do not allocate a device IP")
return nil
}
if node.Status.WireguardPublicKey == "" {
log.Debugf("Wireguard is not running on node %s, do not allocate a device IP", node.Name)
return nil
}
}
var cidrs []net.IPNet
for _, ipPool := range ipPoolList.Items {
_, poolCidr, err := net.ParseCIDR(ipPool.Spec.CIDR)
if err != nil {
log.WithError(err).Fatalf("Failed to parse CIDR '%s' for IPPool '%s'", ipPool.Spec.CIDR, ipPool.Name)
}
// Check if IP pool selects the node
if selects, err := ipam.SelectsNode(ipPool, node); err != nil {
log.WithError(err).Errorf("Failed to compare nodeSelector '%s' for IPPool '%s', skipping", ipPool.Spec.NodeSelector, ipPool.Name)
continue
} else if !selects {
log.Debugf("IPPool '%s' does not select Node '%s'", ipPool.Name, node.Name)
continue
}
// Check if desired encap is enabled in the IP pool, the IP pool is not disabled, and it is IPv4 pool since we
// don't support encap with IPv6.
switch attrType {
case ipam.AttributeTypeVXLAN:
if (ipPool.Spec.VXLANMode == api.VXLANModeAlways || ipPool.Spec.VXLANMode == api.VXLANModeCrossSubnet) && !ipPool.Spec.Disabled && poolCidr.Version() == 4 {
cidrs = append(cidrs, *poolCidr)
}
case ipam.AttributeTypeVXLANV6:
if (ipPool.Spec.VXLANMode == api.VXLANModeAlways || ipPool.Spec.VXLANMode == api.VXLANModeCrossSubnet) && !ipPool.Spec.Disabled && poolCidr.Version() == 6 {
cidrs = append(cidrs, *poolCidr)
}
case ipam.AttributeTypeIPIP:
// Check if IPIP is enabled in the IP pool, the IP pool is not disabled, and it is IPv4 pool since we don't support IPIP with IPv6.
if (ipPool.Spec.IPIPMode == api.IPIPModeCrossSubnet || ipPool.Spec.IPIPMode == api.IPIPModeAlways) && !ipPool.Spec.Disabled && poolCidr.Version() == 4 {
cidrs = append(cidrs, *poolCidr)
}
case ipam.AttributeTypeWireguard:
// Wireguard does not require a specific encap configuration on the pool.
if !ipPool.Spec.Disabled && poolCidr.Version() == 4 {
cidrs = append(cidrs, *poolCidr)
}
}
}
return cidrs
}
// isIpInPool returns if the IP address is in one of the supplied pools.
func isIpInPool(ipAddrStr string, cidrs []net.IPNet) bool {
ipAddress := net.ParseIP(ipAddrStr)
for _, cidr := range cidrs {
if cidr.Contains(ipAddress.IP) {
return true
}
}
return false
}
func getLogger(attrType string) *log.Entry {
switch attrType {
case ipam.AttributeTypeVXLAN:
return log.WithField("type", "vxlanTunnelAddress")
case ipam.AttributeTypeVXLANV6:
return log.WithField("type", "vxlanV6TunnelAddress")
case ipam.AttributeTypeIPIP:
return log.WithField("type", "ipipTunnelAddress")
case ipam.AttributeTypeWireguard:
return log.WithField("type", "wireguardTunnelAddress")
}
return nil
}
// backendClientAccessor is an interface to access the backend client from the main v2 client.
type backendClientAccessor interface {
Backend() bapi.Client
}
// loadFelixEnvConfig loads the felix configuration from environment. It does not perform a hierarchical load across
// env, file and felixconfigurations resources and as such this is should only be used for configuration that is only
// expected to be specified through environment.
func loadFelixEnvConfig() *felixconfig.Config {
// Load locally-defined config from the environment variables.
configParams := felixconfig.New()
envConfig := felixconfig.LoadConfigFromEnvironment(os.Environ())
// Parse and merge the local config.
_, err := configParams.UpdateFrom(envConfig, felixconfig.EnvironmentVariable)
if err != nil {
log.WithError(err).Panic("Failed to parse Felix environments")
}
return configParams
}
|
[
"\"NODENAME\""
] |
[] |
[
"NODENAME"
] |
[]
|
["NODENAME"]
|
go
| 1 | 0 | |
extra_code/transformers-gpt2-finetune.py
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# import deepspeed
# import mpi4py
# import pandas
import torch
import transformers
import wandb
#%env WANDB_PROJECT=wine_gpt2_Trainer_42
MODEL_NAME = "gpt2-medium"
# wandb.login(anonymous='never', key="222a37baaf0c1b0d1499ec003e5c2fe49f97b107")
wandb.init()
# wandb.watch(log='all')
print(torch.cuda.is_available())
print(f"transformers version: {transformers.__version__}")
print(f"PyTorch version: {torch.__version__}")
# Tokenizers
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
print(len(tokenizer))
tokenizer.add_special_tokens(
{"eos_token": "<|startoftext|>", "bos_token": "<|startoftext|>"}
)
tokenizer.add_tokens(
[
"[prompt]",
"[response]",
"[category_1]",
"[category_2]",
"[origin]",
"[description]",
"<|endoftext|>",
]
)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained("data/modeling/trainer_42/")
print(len(tokenizer))
print("Created tokenizer")
class wineDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __len__(self):
return len(self.encodings["input_ids"])
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = item["input_ids"]
return item
with open("data/scraped/name_desc_nlp_ready_train.txt", "r", encoding="utf8") as file:
wines_raw_train = file.read().splitlines()
with open("data/scraped/name_desc_nlp_ready_test.txt", "r", encoding="utf8") as file:
wines_raw_test = file.read().splitlines()
print("Loaded dataset")
# wines_raw_train, wines_raw_test = train_test_split(wines_raw,test_size=0.2)
# wine_encodings_train = tokenizer(wines_raw_train, max_length=200, truncation=True, padding=True)
wine_encodings_test = tokenizer(
wines_raw_test, max_length=200, truncation=True, padding=True
)
print("Encoded dataset")
# wine_dataset_train = wineDataset(wine_encodings_train)
wine_dataset_test = wineDataset(wine_encodings_test)
print("Created PyTorch DataSet")
# train_loader = torch.utils.data.DataLoader(wine_dataset_train)
model = transformers.AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# model.to('cuda')
model.resize_token_embeddings(len(tokenizer))
print(f"model parameters: {model.num_parameters():,}")
training_args = transformers.TrainingArguments(
output_dir="data/modeling/trainer_42/",
overwrite_output_dir=True,
num_train_epochs=1,
per_device_train_batch_size=2,
save_steps=100,
save_total_limit=2,
fp16=True,
# deepspeed='data/ds_config.json'
)
trainer = transformers.Trainer(
model=model, args=training_args, train_dataset=wine_dataset_test,
)
trainer.train()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
repro_packs/rockstuhl/repro_exec_files/scripts/cext_wave_prism_34K_SE.py
|
import numpy
import time
import sys
import os
from argparse import ArgumentParser
import pygbe
from pygbe.util.read_data import read_fields
from pygbe.main import main
from cext_wavelength_scanning import create_diel_list, Cext_wave_scan, Cext_analytical
def read_inputs(args):
"""
Parse command-line arguments to read arguments in main.
"""
parser = ArgumentParser(description='Read path where input files are located')
parser.add_argument('-if',
'--infiles',
type=str,
help="Absolute path where input files are located (downloaded from zenodo)")
return parser.parse_args(args)
def main(argv=sys.argv):
argv=sys.argv
args = read_inputs(argv[1:])
in_files_path = args.infiles
#Import surface data
wave_s, diel_rs, diel_is = numpy.loadtxt('../dielectric_data/4H-SIC_permittivity_10-12_microns.csv', skiprows=1, unpack=True)
air_diel = [1. + 1j*0.] * len(wave_s)
#Creating dielectric list first dielectric outside, then inside
diel_list = [list(eps) for eps in zip(air_diel, diel_rs + 1j*diel_is)]
#Set enviornment variable for PyGBe
folder_path = in_files_path + 'prism6720x26880x3280_SE'
full_path = os.path.abspath(folder_path)+'/'
os.environ['PYGBE_PROBLEM_FOLDER'] = full_path
#Creating dictionary field. We will modify the 'E' key in the for loop.
field_dict_pillars = read_fields(full_path + 'prism_34K.config')
#Calculate Cext(lambda) for pillars' surface
tic_ss = time.time()
e_field = -1.
wave, Cext_pillars = Cext_wave_scan(e_field, wave_s, diel_list, field_dict_pillars, full_path)
toc_ss = time.time()
numpy.savetxt('../results_data/prism_SE_LE_res/'+'prism_34K_short_edge'+'10-20microns.txt',
list(zip(wave, Cext_pillars)),
fmt = '%.9f %.9f',
header = 'lambda [Ang], Cext [nm^2]')
time_simulation = (toc_ss - tic_ss)
with open('../results_data/prism_SE_LE_res/Time_'+'prism_34K_short_edge'+'.txt', 'w') as f:
print('time_total: {} s'.format(time_simulation), file=f)
if __name__ == "__main__":
main(sys.argv)
|
[] |
[] |
[
"PYGBE_PROBLEM_FOLDER"
] |
[]
|
["PYGBE_PROBLEM_FOLDER"]
|
python
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# thecut-authorship documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 29 10:25:18 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import pkg_resources
import django
# sys.path.insert(0, os.path.abspath('..')) # NOQA
os.environ["DJANGO_SETTINGS_MODULE"] = 'test_app.settings'
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'thecut-authorship'
copyright = u'2016, The Cut Creative'
author = u'The Cut Creative'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '.'.join(release.split('.')[:2])
from thecut import authorship
version = authorship.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'thecut-authorship v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'thecut-authorshipdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'thecut-authorship.tex', u'thecut-authorship Documentation',
u'The Cut Creative', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thecut-authorship', u'thecut-authorship Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'thecut-authorship', u'thecut-authorship Documentation',
author, 'thecut-authorship', 'A set of Django mixins to easily '
'record authorship information for your models.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
alerta/management/views.py
|
import datetime
import os
import time
from flask import (Response, current_app, g, jsonify, render_template, request,
url_for)
from flask_cors import cross_origin
from alerta.app import db
from alerta.auth.decorators import permission
from alerta.exceptions import ApiError, RejectException
from alerta.models.alert import Alert
from alerta.models.enums import Scope
from alerta.models.heartbeat import Heartbeat
from alerta.models.metrics import Counter, Gauge, Timer
from alerta.models.switch import Switch, SwitchState
from alerta.utils.api import process_action
from alerta.utils.audit import write_audit_trail
from alerta.version import __version__
from . import mgmt
try:
from alerta import build # type: ignore
except Exception:
from alerta import dev as build # type: ignore
switches = [
Switch('auto-refresh-allow', 'Alerta console auto-refresh',
'Allow consoles to auto-refresh alerts', SwitchState.ON),
Switch('sender-api-allow', 'API alert submission', 'Allow alerts to be submitted via the API', SwitchState.ON)
]
total_alert_gauge = Gauge('alerts', 'total', 'Total alerts', 'Total number of alerts in the database')
started = time.time() * 1000
@mgmt.route('/management', methods=['OPTIONS', 'GET'])
@cross_origin()
def management():
endpoints = [
url_for('mgmt.manifest'),
url_for('mgmt.properties'),
url_for('mgmt.switchboard'),
url_for('mgmt.good_to_go'),
url_for('mgmt.health_check'),
url_for('mgmt.housekeeping'),
url_for('mgmt.status'),
url_for('mgmt.prometheus_metrics')
]
return render_template('management/index.html', endpoints=endpoints)
@mgmt.route('/management/manifest', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission(Scope.read_management)
def manifest():
manifest = {
'release': __version__,
'build': build.BUILD_NUMBER,
'date': build.BUILD_DATE,
'revision': build.BUILD_VCS_NUMBER
}
return jsonify(manifest)
@mgmt.route('/management/properties', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission(Scope.admin_management)
def properties():
properties = ''
for k, v in request.environ.items():
properties += '{}: {}\n'.format(k, v)
for k, v in os.environ.items():
properties += '{}: {}\n'.format(k, v)
for k, v in current_app.__dict__.items():
properties += '{}: {}\n'.format(k, v)
for k, v in current_app.config.items():
properties += '{}: {}\n'.format(k, v)
return Response(properties, content_type='text/plain')
@mgmt.route('/management/switchboard', methods=['OPTIONS', 'GET', 'POST'])
@cross_origin()
@permission(Scope.admin_management)
def switchboard():
if request.method == 'POST':
for switch in Switch.find_all():
try:
value = request.form[switch.name]
switch.set_state(value)
except KeyError:
pass
return render_template('management/switchboard.html', switches=switches)
else:
switch = request.args.get('switch', None)
if switch:
return render_template('management/switchboard.html',
switches=[Switch.find_by_name(switch)])
else:
return render_template('management/switchboard.html', switches=switches)
@mgmt.route('/management/gtg', methods=['OPTIONS', 'GET'])
@cross_origin()
def good_to_go():
if db.is_alive:
return 'OK'
else:
return 'FAILED', 503
@mgmt.route('/management/healthcheck', methods=['OPTIONS', 'GET'])
@cross_origin()
def health_check():
try:
heartbeats = Heartbeat.find_all()
for heartbeat in heartbeats:
delta = datetime.datetime.utcnow() - heartbeat.receive_time
threshold = int(heartbeat.timeout) * 4
if delta.seconds > threshold:
return 'HEARTBEAT_STALE: %s' % heartbeat.origin, 503
except Exception as e:
return 'HEALTH_CHECK_FAILED: %s' % e, 503
return 'OK'
@mgmt.route('/management/housekeeping', methods=['OPTIONS', 'GET', 'POST'])
@cross_origin()
@permission(Scope.admin_management)
def housekeeping():
expired_threshold = request.args.get('expired', current_app.config['DEFAULT_EXPIRED_DELETE_HRS'], type='int')
info_threshold = request.args.get('info', current_app.config['DEFAULT_INFO_DELETE_HRS'], type='int')
has_expired, has_timedout = Alert.housekeeping(expired_threshold, info_threshold)
errors = []
for alert in has_expired:
try:
alert, _, text, timeout = process_action(alert, action='expired', text='', timeout=current_app.config['ALERT_TIMEOUT'])
alert = alert.from_expired(text, timeout)
except RejectException as e:
write_audit_trail.send(current_app._get_current_object(), event='alert-expire-rejected', message=alert.text,
user=g.login, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert',
request=request)
errors.append(str(e))
continue
except Exception as e:
raise ApiError(str(e), 500)
write_audit_trail.send(current_app._get_current_object(), event='alert-expired', message=text, user=g.login,
customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request)
for alert in has_timedout:
try:
alert, _, text, timeout = process_action(alert, action='timeout', text='', timeout=current_app.config['ALERT_TIMEOUT'])
alert = alert.from_timeout(text, timeout)
except RejectException as e:
write_audit_trail.send(current_app._get_current_object(), event='alert-timeout-rejected', message=alert.text,
user=g.login, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert',
request=request)
errors.append(str(e))
continue
except Exception as e:
raise ApiError(str(e), 500)
write_audit_trail.send(current_app._get_current_object(), event='alert-timeout', message=text, user=g.login,
customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request)
if errors:
raise ApiError('housekeeping failed', 500, errors=errors)
else:
return jsonify(
status='ok',
expired=[a.id for a in has_expired],
timedout=[a.id for a in has_timedout],
count=len(has_expired) + len(has_timedout)
)
@mgmt.route('/management/status', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission(Scope.read_management)
def status():
now = int(time.time() * 1000)
total_alert_gauge.set(Alert.get_count())
metrics = Gauge.find_all()
metrics.extend(Counter.find_all())
metrics.extend(Timer.find_all())
metrics.extend(Switch.find_all())
return jsonify(application='alerta', version=__version__, time=now, uptime=int(now - started),
metrics=[metric.serialize() for metric in metrics])
@mgmt.route('/management/metrics', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission(Scope.read_management)
def prometheus_metrics():
total_alert_gauge.set(Alert.get_count())
output = Gauge.find_all()
output += Counter.find_all()
output += Timer.find_all()
return Response(
[o.serialize(format='prometheus') for o in output],
content_type='text/plain; version=0.0.4; charset=utf-8'
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Satchmo Project documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 21:21:45 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# Add satchmo apps.
sys.path.append(os.path.abspath('../satchmo/apps'))
# Setup the 'simple' store.
sys.path.append(os.path.abspath('../satchmo/projects'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'simple.settings'
# For Sphinx to properly work with Satchmo, you need to make one small path to Sphinx:
# Patch here - http://gist.github.com/345738
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Satchmo'
copyright = u'2014, Chris Moffitt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.3'
# The full version, including alpha/beta/rc tags.
release = '0.9.3-Dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'sphinxdoc'
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_option = None
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'SatchmoProjectdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Satchmo.tex', u'Satchmo Documentation',
u'Chris Moffitt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
|
[] |
[] |
[
"READTHEDOCS",
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["READTHEDOCS", "DJANGO_SETTINGS_MODULE"]
|
python
| 2 | 0 | |
pit/app.py
|
"""
This is the Centralized pit application.
Pit owns blobs for analysis, the results of the analysis, the rules used to
score analysis, and serves it all up on demand.
"""
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
import flask_restless_swagger
from flask_restless_swagger import SwagAPIManager as APIManager
from sqlalchemy.dialects.postgresql import JSON
from flask_cors import CORS
app = Flask(__name__)
cors = CORS(app)
default_db = "postgresql://pit:pit@localhost:5432/pit"
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('PIT_DB', default_db)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
flask_restless_swagger.sqlalchemy_swagger_type['JSON'] = "string"
manager = APIManager(app, flask_sqlalchemy_db=db)
class Item(db.Model):
"""Blob to be analyzed. Can have parent and children.
Demands uniqueness."""
id = db.Column(db.Integer, primary_key=True)
hash = db.Column(db.Unicode, unique=True)
parent_hash = db.Column(db.Unicode, db.ForeignKey("item.hash"))
parent = db.relationship("Item", backref="children", remote_side=[hash])
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(),
onupdate=db.func.now())
class Analysis(db.Model):
"""Results of analysis. Belongs to an Item."""
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.Unicode)
item_hash = db.Column(db.Unicode, db.ForeignKey("item.hash"))
item = db.relationship("Item", backref=db.backref("data", lazy='dynamic'))
data = db.Column(JSON)
score = db.Column(db.Integer)
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(),
onupdate=db.func.now())
db.UniqueConstraint('key', 'item_hash')
class Rule(db.Model):
"""Rules used to score analysis data."""
id = db.Column(db.Integer, primary_key=True)
matcher = db.Column(db.Unicode)
# NB: Value can be negative...
value = db.Column(db.Unicode)
# This is the key that ties it to analysis data.
analysis_key = db.Column(db.Unicode)
# This is the key used to identify the rule uniquely.
rule_key = db.Column(db.Unicode)
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(),
onupdate=db.func.now())
manager.create_api(Item, primary_key='hash',
methods=['GET', 'POST', 'DELETE', 'PATCH'],
url_prefix='')
manager.create_api(Analysis, methods=['GET', 'POST', 'DELETE', 'PATCH'],
url_prefix='')
manager.create_api(Rule, primary_key="analysis_key",
methods=['GET', 'POST', 'DELETE', 'PATCH'],
url_prefix='')
db.create_all()
if __name__ == '__main__':
app.run()
|
[] |
[] |
[
"PIT_DB"
] |
[]
|
["PIT_DB"]
|
python
| 1 | 0 | |
jinahub/indexers/searcher/FaissSearcher/tests/integration/test_train_index.py
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
trained_index_file = os.path.join(tmpdir, 'faiss.index')
train_data = np.array(np.random.random([512, 10]), dtype=np.float32)
index_docs = _get_docs_from_vecs(train_data)
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF6,PQ2',
'trained_index_file': trained_index_file,
},
uses_meta=metas,
)
with f:
import faiss
faiss_index = faiss.index_factory(10, 'IVF6,PQ2', faiss.METRIC_INNER_PRODUCT)
faiss.normalize_L2(train_data)
faiss_index.train(train_data)
faiss.write_index(faiss_index, trained_index_file)
# train and index docs first
f.post(on='/index', data=index_docs)
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'limit': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
[] |
[] |
[
"TEST_WORKSPACE"
] |
[]
|
["TEST_WORKSPACE"]
|
python
| 1 | 0 | |
config.py
|
import os
import arcpy as ap
import glob
from gis_lib.helpers import *
# finds a date and creates local variables based on that date
def local_config():
# Function to find monday's date
# looks at current folder finds all
# files with text and looks at suffix
# to find the greatest number YYMMDD
def local_date(text):
for file in glob.glob(text):
dates = []
dates.append(int(file[-10:-4]))
date = str(max(dates))
print(f'{text[:-1]} Date: {date}')
return date
# RUNS LOCAL Date finds monday's date
sched_date = local_date('METRO_PATTERNS*')
# gets AUTOMATION EXPORTS from .env file to get the Automation_Exports directory
# this is where all of the exports are stored after processing the csv's
# this is a catalog of all the historic weekly gdb's and the current gdb
Automation_Exports = os.environ['AUTOMATION_EXPORTS']
# This is where the csv's are stored when the DBA's export every monday
Sql_Exports = os.environ['SQL_Exports']
return {
"ACS_Year": os.environ['TITLE_VI_GDB'][-6:-4],
"Automation_Exports": Automation_Exports,
"cf_gdb": os.path.join(Automation_Exports, "CurrentFiles.gdb"), # automation_exports\\current.gdb
"ds_gdb": os.path.join(Automation_Exports, f"DataStore_{sched_date}.gdb"), # weekly datastore updates
"sched_date": sched_date,
"sign": current_sign(os.path.join(Sql_Exports, f'METROBUS-STOP-EXTRACTION{sched_date}.csv')),
"Sql_Exports": Sql_Exports,
"TitleVI": os.environ['TITLE_VI_GDB'],
}
# function that acts like a class and returns an object with the portal connection information
# args are the full feature class list (features.py) and a string defining the portal type (agol or enterprise)
def portal_config(fc_list, portal):
features = []
for fc in fc_list:
if fc[portal] is True:
features.append(fc)
print(f"- {fc['title']}")
# pulls in data from .env to fill in the return object
# depending on whether it is an agol or enterprise portal at the time of run
if portal == 'agol':
profile = {
"portal": 'https://www.arcgis.com/',
"user": os.environ['AGOL_USER'],
"password": os.environ['AGOL_PASSWORD'],
"project": os.environ['AGOL_PROJECT'],
}
elif portal == 'enterprise':
profile = {
"portal": os.environ['ENTERP_PORTAL'],
"user": os.environ['ENTERP_USER'],
"password": os.environ['ENTERP_PASSWORD'],
"project": os.environ['ENTERP_PROJECT'],
}
# returns if error
else:
print(f"{portal} is not a valid portal")
profile["portal_type"] = portal
profile["features"] = features
return profile
# returns a combination of config objects that have all of the information to run processing information
# takes in csv_locs return object, the weekly csv directory from add_columns(), and the local_config() return object
def config_options(files, csv_dir, local):
ap.env.workspace = os.path.join(local['Automation_Exports'], local['ds_gdb'])
# destructures local object for easy usage in return function
sign = local['sign']
date = local['sched_date']
acs_year = local['ACS_Year']
ds_gdb = local['ds_gdb']
cf_gdb = local['cf_gdb']
title_vi_gdb = local['TitleVI']
return {
"sign": sign,
"csv_dir": csv_dir,
"date": date,
"acs_year": acs_year,
"title_vi_gdb": title_vi_gdb,
"files": files,
"org_dir": csv_dir['org_dir'],
"processed_dir": csv_dir['processed_dir'],
"ds_gdb": ds_gdb,
"cf_gdb": cf_gdb,
"files": {
"ada": f'ADA_ROUTES{date}',
"ghoststops": f'GHOST-STOPS-EXTRACTION{date}',
"patterns": {
"xy": f'METRO_PATTERNS_XY_{date}',
"name": f'METRO_PATTERNS{date}',
"line": f'METRO_PATTERNS{date}_{sign}',
},
"stops": f'METROBUS-STOP-EXTRACTION{date}',
"stops_by_line": f'METROBUS-STOPBYLINE_EXTRACTION-WITH-DISTANCE{date}',
"feat_classes":{
"eam_stops": f"EAMMetroBusStops_{sign}_{date}",
"eam_stops_1000": f"EAMMetroBusStops_1000_{sign}_{date}",
"ghoststops": f'MetroBusGhostStops_{sign}_{date}',
"routes": f'MetroBusRoutes_{sign}_{date}',
"route_buffer": "MetroBusRouteBuffer_",
"routes_dir": f'MetroBusRoutes_dir_{sign}_{date}',
"stops": f'MetroBusStops_{sign}_{date}',
"stops_by_line": f'MetroBusStopsByLine_{sign}_{date}',
"sys_buffer": "MetroBusSystemBuffer_",
},
"registered": {
"stops": "MetroBusStops_REGISTERED",
"grid1000": "MGRS_Grid_1000",
"eam_stops_1000": "EAMMetroBusStops_1000",
"grid1000": "MGRS_Grid_1000",
"grid10000": "MGRS_Grid_10000",
"eam_stops": "EAMMetroBusStops_REGISTERED",
"eam_stops": "EAMMetroBusStops_1000_REGISTERED",
"lightrail_buffer": "Lightrail_buffer"
},
"updateList":[
"MetroBusRoutes_REGISTERED",
"MetroBusStops_REGISTERED",
"MetroBusGhostStops_REGISTERED",
"MetroBusStopsByLine_REGISTERED",
"MetroBusRouteBuffer_05_REGISTERED",
"MetroBusRouteBuffer_025_REGISTERED",
"MetroBusRouteBuffer_075_REGISTERED",
"MetroBusSystemBuffer_05_REGISTERED",
"MetroBusSystemBuffer_025_REGISTERED",
"MetroBusSystemBuffer_075_REGISTERED",
"MetroBusRoutes_dir_REGISTERED",
"EAMMetroBusStops_REGISTERED",
"MetroADAServiceArea_REGISTERED"
]
}
}
|
[] |
[] |
[
"AGOL_PASSWORD",
"AUTOMATION_EXPORTS",
"AGOL_USER",
"TITLE_VI_GDB",
"AGOL_PROJECT",
"ENTERP_PORTAL",
"ENTERP_USER",
"ENTERP_PASSWORD",
"ENTERP_PROJECT",
"SQL_Exports"
] |
[]
|
["AGOL_PASSWORD", "AUTOMATION_EXPORTS", "AGOL_USER", "TITLE_VI_GDB", "AGOL_PROJECT", "ENTERP_PORTAL", "ENTERP_USER", "ENTERP_PASSWORD", "ENTERP_PROJECT", "SQL_Exports"]
|
python
| 10 | 0 | |
pyspedas/csswe/config.py
|
import os
CONFIG = {'local_data_dir': 'csswe_data/',
'remote_data_dir': 'https://spdf.sci.gsfc.nasa.gov/pub/data/csswe/'}
# override local data directory with environment variables
if os.environ.get('ROOT_DATA_DIR'):
CONFIG['local_data_dir'] = os.sep.join([os.environ['ROOT_DATA_DIR'], 'csswe'])
if os.environ.get('CSSWE_DATA_DIR'):
CONFIG['local_data_dir'] = os.environ['CSSWE_DATA_DIR']
|
[] |
[] |
[
"CSSWE_DATA_DIR",
"ROOT_DATA_DIR"
] |
[]
|
["CSSWE_DATA_DIR", "ROOT_DATA_DIR"]
|
python
| 2 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/127/403/CWE134_Uncontrolled_Format_String__Environment_printf_53a.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE134_Uncontrolled_Format_String__Environment_printf_53a.java
Label Definition File: CWE134_Uncontrolled_Format_String.label.xml
Template File: sources-sinks-53a.tmpl.java
*/
/*
* @description
* CWE: 134 Uncontrolled Format String
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded string
* Sinks: printf
* GoodSink: dynamic printf format with string defined
* BadSink : dynamic printf without validation
* Flow Variant: 53 Data flow: data passed as an argument from one method through two others to a fourth; all four functions are in different classes in the same package
*
* */
public class CWE134_Uncontrolled_Format_String__Environment_printf_53a extends AbstractTestCase
{
public void bad() throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
(new CWE134_Uncontrolled_Format_String__Environment_printf_53b()).badSink(data );
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
String data;
/* FIX: Use a hardcoded string */
data = "foo";
(new CWE134_Uncontrolled_Format_String__Environment_printf_53b()).goodG2BSink(data );
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G() throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
(new CWE134_Uncontrolled_Format_String__Environment_printf_53b()).goodB2GSink(data );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\"",
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
src/main/java/edu/sdsu/its/rohan_search/Param.java
|
package edu.sdsu.its.rohan_search;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import org.apache.http.client.utils.URIBuilder;
import org.apache.log4j.Logger;
import javax.ws.rs.core.MediaType;
import java.net.URI;
import java.net.URISyntaxException;
/**
* TODO JavaDoc
*
* @author Tom Paulus
* Created on 12/10/15.
*/
public class Param {
final private static String URL = System.getenv("KSPATH");
final private static String KEY = System.getenv("KSKEY");
public static String getParam(final String applicationName, final String parameterName) {
try {
final URI uri = new URIBuilder()
.setScheme("https")
.setHost(URL)
.setPath("/rest/client/param")
.addParameter("key", KEY)
.addParameter("app", applicationName)
.addParameter("name", parameterName)
.build();
final ClientResponse response = get(uri);
return response.getEntity(String.class);
} catch (URISyntaxException e) {
Logger.getLogger(Param.class).error("problem forming Connection URI - ", e);
return "";
}
}
/**
* Make HTTP Get requests and return the Response form the Server.
*
* @param uri {@link URI} URI used to make get Request.
* @return {@link ClientResponse} Response from get Request.
*/
private static ClientResponse get(final URI uri) {
Logger.getLogger(Param.class).info("Making a get request to: " + uri.toString());
final Client client = Client.create();
final WebResource webResource = client.resource(uri);
ClientResponse response;
try {
response = webResource.accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
if (response.getStatus() != 200) {
Logger.getLogger(Param.class).error("Error Connecting to Key Server - HTTP Error Code: " + response.getStatus());
}
} catch (UniformInterfaceException e) {
response = null;
Logger.getLogger(Param.class).error("Error connecting to Key Server Server", e);
}
return response;
}
public static void main(String[] args) {
System.out.println(Param.getParam("rohan_search", "db_host"));
}
}
|
[
"\"KSPATH\"",
"\"KSKEY\""
] |
[] |
[
"KSPATH",
"KSKEY"
] |
[]
|
["KSPATH", "KSKEY"]
|
java
| 2 | 0 | |
app/ubc_course_explorer/settings.py
|
"""
Django settings for ubc_course_explorer project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG"))
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS").split(" ")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'coursetracker',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ubc_course_explorer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ubc_course_explorer.wsgi.application'
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE"),
"NAME": os.environ.get("SQL_DATABASE"),
"USER": os.environ.get("SQL_USER"),
"PASSWORD": os.environ.get("SQL_PASSWORD"),
"HOST": os.environ.get("SQL_HOST"),
"PORT": os.environ.get("SQL_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Vancouver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/staticfiles/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
|
[] |
[] |
[
"SQL_PASSWORD",
"SQL_ENGINE",
"SQL_HOST",
"SQL_DATABASE",
"SQL_USER",
"SECRET_KEY",
"SQL_PORT",
"DEBUG",
"DJANGO_ALLOWED_HOSTS"
] |
[]
|
["SQL_PASSWORD", "SQL_ENGINE", "SQL_HOST", "SQL_DATABASE", "SQL_USER", "SECRET_KEY", "SQL_PORT", "DEBUG", "DJANGO_ALLOWED_HOSTS"]
|
python
| 9 | 0 | |
firstrun.go
|
//+build !nofirstrun
// Copyright 2018. Akamai Technologies, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
akamai "github.com/akamai/cli-common-golang"
"github.com/fatih/color"
"github.com/kardianos/osext"
"github.com/mattn/go-isatty"
)
func firstRun() error {
if !isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()) {
return nil
}
bannerShown, err := firstRunCheckInPath()
if err != nil {
return err
}
bannerShown = firstRunCheckUpgrade(bannerShown)
firstRunCheckStats(bannerShown)
return nil
}
func firstRunCheckInPath() (bool, error) {
selfPath, err := osext.Executable()
if err != nil {
return false, err
}
os.Args[0] = selfPath
dirPath := filepath.Dir(selfPath)
if runtime.GOOS == "windows" {
dirPath = strings.ToLower(dirPath)
}
sysPath := os.Getenv("PATH")
paths := filepath.SplitList(sysPath)
inPath := false
writablePaths := []string{}
var bannerShown bool
if getConfigValue("cli", "install-in-path") == "no" {
inPath = true
bannerShown = firstRunCheckUpgrade(!inPath)
}
if len(paths) == 0 {
inPath = true
bannerShown = firstRunCheckUpgrade(!inPath)
}
for _, path := range paths {
if len(strings.TrimSpace(path)) == 0 {
continue
}
if runtime.GOOS == "windows" {
path = strings.ToLower(path)
}
if err := checkAccess(path, ACCESS_W_OK); err == nil {
writablePaths = append(writablePaths, path)
}
if path == dirPath {
inPath = true
bannerShown = firstRunCheckUpgrade(false)
}
}
if !inPath && len(writablePaths) > 0 {
if !bannerShown {
showBanner()
bannerShown = true
}
fmt.Fprint(akamai.App.Writer, "Akamai CLI is not installed in your PATH, would you like to install it? [Y/n]: ")
answer := ""
fmt.Scanln(&answer)
if answer != "" && strings.ToLower(answer) != "y" {
setConfigValue("cli", "install-in-path", "no")
saveConfig()
firstRunCheckUpgrade(true)
return true, nil
}
choosePath(writablePaths, answer, selfPath)
}
return bannerShown, nil
}
func choosePath(writablePaths []string, answer string, selfPath string) {
fmt.Fprintln(akamai.App.Writer, color.YellowString("Choose where you would like to install Akamai CLI:"))
for i, path := range writablePaths {
fmt.Fprintf(akamai.App.Writer, "(%d) %s\n", i+1, path)
}
fmt.Fprint(akamai.App.Writer, "Enter a number: ")
answer = ""
fmt.Scanln(&answer)
index, err := strconv.Atoi(answer)
if err != nil {
fmt.Fprintln(akamai.App.Writer, color.RedString("Invalid choice, try again"))
choosePath(writablePaths, answer, selfPath)
}
if answer == "" || index < 1 || index > len(writablePaths) {
fmt.Fprintln(akamai.App.Writer, color.RedString("Invalid choice, try again"))
choosePath(writablePaths, answer, selfPath)
}
suffix := ""
if runtime.GOOS == "windows" {
suffix = ".exe"
}
newPath := filepath.Join(writablePaths[index-1], "akamai"+suffix)
akamai.StartSpinner(
"Installing to "+newPath+"...",
"Installing to "+newPath+"...... ["+color.GreenString("OK")+"]\n",
)
err = os.Rename(selfPath, newPath)
os.Args[0] = newPath
if err != nil {
akamai.StopSpinnerFail()
fmt.Fprintln(akamai.App.Writer, color.RedString(err.Error()))
}
akamai.StopSpinnerOk()
}
func firstRunCheckUpgrade(bannerShown bool) bool {
if getConfigValue("cli", "last-upgrade-check") == "" {
if !bannerShown {
bannerShown = true
showBanner()
}
fmt.Fprint(akamai.App.Writer, "Akamai CLI can auto-update itself, would you like to enable daily checks? [Y/n]: ")
answer := ""
fmt.Scanln(&answer)
if answer != "" && strings.ToLower(answer) != "y" {
setConfigValue("cli", "last-upgrade-check", "ignore")
saveConfig()
return bannerShown
}
setConfigValue("cli", "last-upgrade-check", "never")
saveConfig()
}
return bannerShown
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
cpu-miner/cpuid/cpuid_arm64.go
|
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cpuid
import "os"
func cpuid(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func xgetbv(index uint32) (eax, edx uint32) {
return 0, 0
}
func haveArmSha() bool {
return os.Getenv("ARMSHA") != ""
}
|
[
"\"ARMSHA\""
] |
[] |
[
"ARMSHA"
] |
[]
|
["ARMSHA"]
|
go
| 1 | 0 | |
pkg/services/live/pipeline/devdata.go
|
package pipeline
import (
"bytes"
"context"
"encoding/json"
"log"
"math/rand"
"net/http"
"os"
"time"
"github.com/grafana/grafana/pkg/services/live/managedstream"
"github.com/centrifugal/centrifuge"
"github.com/grafana/grafana-plugin-sdk-go/data"
)
type Data struct {
Value1 float64 `json:"value1"`
Value2 float64 `json:"value2"`
Value3 *float64 `json:"value3"`
Value4 float64 `json:"value4"`
Annotation string `json:"annotation"`
Array []float64 `json:"array"`
Map map[string]interface{} `json:"map"`
Host string `json:"host"`
Status string `json:"status"`
}
// TODO: temporary for development, remove.
func postTestData() {
i := 0
for {
time.Sleep(1000 * time.Millisecond)
num1 := rand.Intn(10)
num2 := rand.Intn(10)
d := Data{
Value1: float64(num1),
Value2: float64(num2),
Value4: float64(i % 10),
Annotation: "odd",
Array: []float64{float64(rand.Intn(10)), float64(rand.Intn(10))},
Map: map[string]interface{}{
"red": 1,
"yellow": 4,
"green": 7,
},
Host: "macbook-local",
Status: "running",
}
if i%2 != 0 {
val := 4.0
d.Value3 = &val
}
if i%2 == 0 {
val := 3.0
d.Value3 = &val
d.Annotation = "even"
}
if i%10 == 0 {
d.Value3 = nil
}
jsonData, _ := json.Marshal(d)
log.Println(string(jsonData))
req, _ := http.NewRequest("POST", "http://localhost:3000/api/live/push/json/auto", bytes.NewReader(jsonData))
req.Header.Set("Authorization", "Bearer "+os.Getenv("GF_TOKEN"))
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
_ = resp.Body.Close()
req, _ = http.NewRequest("POST", "http://localhost:3000/api/live/push/json/tip", bytes.NewReader(jsonData))
req.Header.Set("Authorization", "Bearer "+os.Getenv("GF_TOKEN"))
resp, err = http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
_ = resp.Body.Close()
req, _ = http.NewRequest("POST", "http://localhost:3000/api/live/push/json/exact", bytes.NewReader(jsonData))
req.Header.Set("Authorization", "Bearer "+os.Getenv("GF_TOKEN"))
resp, err = http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
_ = resp.Body.Close()
i++
}
}
type DevRuleBuilder struct {
Node *centrifuge.Node
ManagedStream *managedstream.Runner
FrameStorage *FrameStorage
ChannelHandlerGetter ChannelHandlerGetter
}
func (f *DevRuleBuilder) BuildRules(_ context.Context, _ int64) ([]*LiveChannelRule, error) {
return []*LiveChannelRule{
{
Pattern: "plugin/testdata/random-20Hz-stream",
Converter: NewJsonFrameConverter(JsonFrameConverterConfig{}),
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
NewRemoteWriteFrameOutput(
os.Getenv("GF_LIVE_REMOTE_WRITE_ENDPOINT"),
os.Getenv("GF_LIVE_REMOTE_WRITE_USER"),
os.Getenv("GF_LIVE_REMOTE_WRITE_PASSWORD"),
1000,
),
},
Subscribers: []Subscriber{
NewBuiltinSubscriber(f.ChannelHandlerGetter),
NewManagedStreamSubscriber(f.ManagedStream),
},
},
{
Pattern: "stream/testdata/random-20Hz-stream",
FrameProcessors: []FrameProcessor{
NewKeepFieldsFrameProcessor(KeepFieldsFrameProcessorConfig{
FieldNames: []string{"Time", "Min", "Max"},
}),
},
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
},
},
{
OrgId: 1,
Pattern: "stream/influx/input",
Converter: NewAutoInfluxConverter(AutoInfluxConverterConfig{
FrameFormat: "labels_column",
}),
},
{
OrgId: 1,
Pattern: "stream/influx/input/:rest",
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
},
},
{
OrgId: 1,
Pattern: "stream/influx/input/cpu",
// TODO: Would be fine to have KeepLabelsProcessor, but we need to know frame type
// since there are cases when labels attached to a field, and cases where labels
// set in a first frame column (in Influx converter). For example, this will allow
// to leave only "total-cpu" data while dropping individual CPUs.
FrameProcessors: []FrameProcessor{
NewKeepFieldsFrameProcessor(KeepFieldsFrameProcessorConfig{
FieldNames: []string{"labels", "time", "usage_user"},
}),
},
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
NewConditionalOutput(
NewFrameNumberCompareCondition("usage_user", "gte", 50),
NewRedirectFrameOutput(RedirectOutputConfig{
Channel: "stream/influx/input/cpu/spikes",
}),
),
},
},
{
OrgId: 1,
Pattern: "stream/influx/input/cpu/spikes",
FrameOutputters: []FrameOutputter{NewManagedStreamFrameOutput(f.ManagedStream)},
},
{
OrgId: 1,
Pattern: "stream/json/auto",
Converter: NewAutoJsonConverter(AutoJsonConverterConfig{}),
FrameOutputters: []FrameOutputter{NewManagedStreamFrameOutput(f.ManagedStream)},
},
{
OrgId: 1,
Pattern: "stream/json/tip",
Converter: NewAutoJsonConverter(AutoJsonConverterConfig{
FieldTips: map[string]Field{
"value3": {
Name: "value3",
Type: data.FieldTypeNullableFloat64,
},
"value100": {
Name: "value100",
Type: data.FieldTypeNullableFloat64,
},
},
}),
FrameProcessors: []FrameProcessor{
NewDropFieldsFrameProcessor(DropFieldsFrameProcessorConfig{
FieldNames: []string{"value2"},
}),
},
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
},
},
{
OrgId: 1,
Pattern: "stream/json/exact",
Converter: NewExactJsonConverter(ExactJsonConverterConfig{
Fields: []Field{
{
Name: "time",
Type: data.FieldTypeTime,
Value: "#{now}",
},
{
Name: "value1",
Type: data.FieldTypeNullableFloat64,
Value: "$.value1",
},
{
Name: "value2",
Type: data.FieldTypeNullableFloat64,
Value: "$.value2",
},
{
Name: "value3",
Type: data.FieldTypeNullableFloat64,
Value: "$.value3",
Labels: []Label{
{
Name: "host",
Value: "$.host",
},
},
},
{
Name: "value4",
Type: data.FieldTypeNullableFloat64,
Value: "$.value4",
Config: &data.FieldConfig{
Thresholds: &data.ThresholdsConfig{
Mode: data.ThresholdsModeAbsolute,
Steps: []data.Threshold{
{
Value: 2,
State: "normal",
Color: "green",
},
{
Value: 6,
State: "warning",
Color: "orange",
},
{
Value: 8,
State: "critical",
Color: "red",
},
},
},
},
},
{
Name: "map.red",
Type: data.FieldTypeNullableFloat64,
Value: "$.map.red",
Labels: []Label{
{
Name: "host",
Value: "$.host",
},
{
Name: "host2",
Value: "$.host",
},
},
},
{
Name: "annotation",
Type: data.FieldTypeNullableString,
Value: "$.annotation",
},
{
Name: "running",
Type: data.FieldTypeNullableBool,
Value: "{x.status === 'running'}",
},
{
Name: "num_map_colors",
Type: data.FieldTypeNullableFloat64,
Value: "{Object.keys(x.map).length}",
},
},
}),
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
NewRemoteWriteFrameOutput(
os.Getenv("GF_LIVE_REMOTE_WRITE_ENDPOINT"),
os.Getenv("GF_LIVE_REMOTE_WRITE_USER"),
os.Getenv("GF_LIVE_REMOTE_WRITE_PASSWORD"),
0,
),
NewChangeLogFrameOutput(f.FrameStorage, ChangeLogOutputConfig{
FieldName: "value3",
Channel: "stream/json/exact/value3/changes",
}),
NewChangeLogFrameOutput(f.FrameStorage, ChangeLogOutputConfig{
FieldName: "annotation",
Channel: "stream/json/exact/annotation/changes",
}),
NewConditionalOutput(
NewMultipleFrameConditionChecker(
ConditionAll,
NewFrameNumberCompareCondition("value1", "gte", 3.0),
NewFrameNumberCompareCondition("value2", "gte", 3.0),
),
NewRedirectFrameOutput(RedirectOutputConfig{
Channel: "stream/json/exact/condition",
}),
),
NewThresholdOutput(f.FrameStorage, ThresholdOutputConfig{
FieldName: "value4",
Channel: "stream/json/exact/value4/state",
}),
},
},
{
OrgId: 1,
Pattern: "stream/json/exact/value3/changes",
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
NewRemoteWriteFrameOutput(
os.Getenv("GF_LIVE_REMOTE_WRITE_ENDPOINT"),
os.Getenv("GF_LIVE_REMOTE_WRITE_USER"),
os.Getenv("GF_LIVE_REMOTE_WRITE_PASSWORD"),
0,
),
},
},
{
OrgId: 1,
Pattern: "stream/json/exact/annotation/changes",
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
},
},
{
OrgId: 1,
Pattern: "stream/json/exact/condition",
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
},
},
{
OrgId: 1,
Pattern: "stream/json/exact/value4/state",
FrameOutputters: []FrameOutputter{
NewManagedStreamFrameOutput(f.ManagedStream),
},
},
}, nil
}
|
[
"\"GF_TOKEN\"",
"\"GF_TOKEN\"",
"\"GF_TOKEN\"",
"\"GF_LIVE_REMOTE_WRITE_ENDPOINT\"",
"\"GF_LIVE_REMOTE_WRITE_USER\"",
"\"GF_LIVE_REMOTE_WRITE_PASSWORD\"",
"\"GF_LIVE_REMOTE_WRITE_ENDPOINT\"",
"\"GF_LIVE_REMOTE_WRITE_USER\"",
"\"GF_LIVE_REMOTE_WRITE_PASSWORD\"",
"\"GF_LIVE_REMOTE_WRITE_ENDPOINT\"",
"\"GF_LIVE_REMOTE_WRITE_USER\"",
"\"GF_LIVE_REMOTE_WRITE_PASSWORD\""
] |
[] |
[
"GF_TOKEN",
"GF_LIVE_REMOTE_WRITE_ENDPOINT",
"GF_LIVE_REMOTE_WRITE_USER",
"GF_LIVE_REMOTE_WRITE_PASSWORD"
] |
[]
|
["GF_TOKEN", "GF_LIVE_REMOTE_WRITE_ENDPOINT", "GF_LIVE_REMOTE_WRITE_USER", "GF_LIVE_REMOTE_WRITE_PASSWORD"]
|
go
| 4 | 0 | |
janni/jmain.py
|
"""
MIT License
Copyright (c) 2019 Max Planck Institute of Molecular Physiology
Author: Thorsten Wagner ([email protected])
Author: Luca Lusnig ([email protected])
Author: Fabian Schoenfeld ([email protected])
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import sys
import json
import os
import h5py
from gooey import Gooey, GooeyParser
import janni.__init__ as ini
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
try:
os.environ["CUDA_VISIBLE_DEVICES"]
except KeyError:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
DEFAULT_BATCH_SIZE = 4
DEFAULT_PADDING = 24
ARGPARSER = None
def create_config_parser(parser):
config_required_group = parser.add_argument_group(
"Required arguments",
"The arguments are required to create a config file for JANNI",
)
config_required_group.add_argument(
"config_out_path",
default="config_janni.json",
help="Path where you want to write the config file.",
widget="FileSaver",
gooey_options={
"validator": {
"test": 'user_input.endswith("json")',
"message": "File has to end with .json!",
},
"default_file": "config_janni.json"
},
)
config_required_group.add_argument(
"--movie_dir",
help="Path to the directory with the movie files. The movie files can be unaligned. You should use at least 30 movies. If an average with the same filename already exists in even_dir or odd_dir it will be skipped.",
widget="DirChooser",
)
config_required_group.add_argument(
"--even_dir",
help="For each movie in movie_dir, an average based on the even frames is calculated and saved in even_dir.",
widget="DirChooser",
)
config_required_group.add_argument(
"--odd_dir",
help="For each movie in movie_dir, an average based on the odd frames is calculated and saved in odd_dir.",
widget="DirChooser",
)
config_required_group.add_argument(
"--saved_weights_name",
default="janni_model.h5",
help="Path for saving final weights.",
widget="FileSaver",
gooey_options={
"validator": {
"test": 'user_input.endswith("h5")',
"message": "File has to end with .h5!",
},
"default_file": "janni_model.h5"
},
)
config_optional_group = parser.add_argument_group(
"Optional arguments",
"The arguments are optional to create a config file for JANNI",
)
config_optional_group.add_argument(
"--patch_size",
default=1024,
type=int,
help="The image will be denoised in patches. This field defines the patch size..",
)
config_optional_group.add_argument(
"--batch_size",
type=int,
default=4,
help="How many patches are in one mini-batch. If you have memory problems (e.g with cards < 8GB memory), you can try to reduce this value.",
)
config_optional_group.add_argument(
"--learning_rate",
type=float,
default=10**-3,
help="Learning rate, should not be changed.",
)
config_optional_group.add_argument(
"--nb_epoch",
type=int,
default=100,
help="Number of epochs to train. Default is 100. More epochs seems to only slightly improve the results.",
)
def create_train_parser(parser):
required_group = parser.add_argument_group(
"Required arguments", "These options are mandatory to train JANNI"
)
required_group.add_argument(
"config_path",
help="Path to config.json",
widget="FileChooser",
gooey_options={
"wildcard": "*.json"
}
)
optional_group = parser.add_argument_group(
"Optional arguments", "These options are optional to train JANNI"
)
optional_group.add_argument(
"-g", "--gpu", type=int, default=-1, help="GPU ID to run on"
)
def create_predict_parser(parser):
required_group = parser.add_argument_group(
"Required arguments", "These options are mandatory to run JANNI"
)
required_group.add_argument(
"input_path",
help="Directory / file path with images/movies to denoise. In our experience movie aligned averages are working best. \n",
widget="DirChooser",
)
required_group.add_argument(
"output_path",
help="Directory / file path to write denoised images.\n",
widget="DirChooser",
)
required_group.add_argument(
"model_path",
help="File path to trained model.",
widget="FileChooser",
gooey_options={
"wildcard": "*.h5"
}
)
optional_group = parser.add_argument_group(
"Optional arguments", "These options are optional to run JANNI"
)
optional_group.add_argument(
"-ol",
"--overlap",
help="The patches have to overlap to remove artifacts. This is the amount of overlap in pixel. If you observe a grid like pattern in your images, increase this value.",
default=DEFAULT_PADDING,
)
optional_group.add_argument(
"-bs",
"--batch_size",
help="Number of patches predicted in parallel\n",
default=DEFAULT_BATCH_SIZE,
)
optional_group.add_argument(
"-g", "--gpu", type=int, default=-1, help="GPU ID to run on"
)
def create_parser(parser):
subparsers = parser.add_subparsers(help="sub-command help")
parser_config= subparsers.add_parser("config", help="Create the configuration file for JANNI")
create_config_parser(parser_config)
parser_train = subparsers.add_parser("train", help="Train JANNI for your dataset.")
create_train_parser(parser_train)
parser_predict = subparsers.add_parser("denoise", help="Denoise micrographs using a (pre)trained model.")
create_predict_parser(parser_predict)
def get_parser():
parser = GooeyParser(description="Just another noise to noise implementation")
create_parser(parser)
return parser
def _main_():
global ARGPARSER
import sys
if len(sys.argv) >= 2:
if not "--ignore-gooey" in sys.argv:
sys.argv.append("--ignore-gooey")
kwargs = {"terminal_font_family": "monospace", "richtext_controls": True}
Gooey(
main,
program_name="JANNI " + ini.__version__,
#image_dir=os.path.join(os.path.abspath(os.path.dirname(__file__)), "../icons"),
progress_regex=r"^.* \( Progress:\s+(-?\d+) % \)$",
disable_progress_bar_animation=True,
tabbed_groups=True,
default_size=(1024, 730),
**kwargs
)()
def main(args=None):
if args is None:
parser = get_parser()
args = parser.parse_args()
if "config" in sys.argv[1]:
generate_config_file(config_out_path=args.config_out_path,
architecture="unet",
patch_size=args.patch_size,
movie_dir=args.movie_dir,
even_dir=args.even_dir,
odd_dir=args.odd_dir,
batch_size=args.batch_size,
learning_rate=args.learning_rate,
nb_epoch=args.nb_epoch,
saved_weights_name=args.saved_weights_name)
else:
if isinstance(args.gpu, list):
if len(args.gpu) == 1:
if args.gpu[0] != "-1":
str_gpus = args.gpu[0].strip().split(" ")
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str_gpus)
elif args.gpu != -1:
str_gpus = str(args.gpu)
os.environ["CUDA_VISIBLE_DEVICES"] = str_gpus
if "train" in sys.argv[1]:
config = read_config(args.config_path)
from . import train
train.train(
even_path=config["train"]["even_dir"],
odd_path=config["train"]["odd_dir"],
model_out_path=config["train"]["saved_weights_name"],
movie_path=config["train"]["movie_dir"],
learning_rate=config["train"]["learning_rate"],
epochs=config["train"]["nb_epoch"],
model=config["model"]["architecture"],
patch_size=(config["model"]["patch_size"], config["model"]["patch_size"]),
batch_size=config["train"]["batch_size"],
)
elif "denoise" in sys.argv[1]:
input_path = args.input_path
output_path = args.output_path
model_path = args.model_path
from . import predict
batch_size = DEFAULT_BATCH_SIZE
padding = DEFAULT_PADDING
with h5py.File(model_path, mode="r") as f:
try:
import numpy as np
model = str(np.array((f["model_name"])))
patch_size = tuple(f["patch_size"])
except KeyError:
print("Error on loading model", model_path)
sys.exit(0)
if args.overlap is not None:
padding = int(args.overlap)
if args.batch_size is not None:
batch_size = int(args.batch_size)
predict.predict(
input_path=input_path,
output_path=output_path,
model_path=model_path,
model=model,
patch_size=patch_size,
padding=padding,
batch_size=batch_size,
)
def generate_config_file(config_out_path,
architecture,
patch_size,
movie_dir,
even_dir,
odd_dir,
batch_size,
learning_rate,
nb_epoch,
saved_weights_name):
model_dict = {'architecture': architecture,
'patch_size': patch_size,
}
train_dict = {'movie_dir': movie_dir,
'even_dir': even_dir,
'odd_dir': odd_dir,
'batch_size': batch_size,
'learning_rate': learning_rate,
'nb_epoch': nb_epoch,
"saved_weights_name": saved_weights_name,
}
from json import dump
dict = {"model": model_dict, "train": train_dict}
with open(config_out_path, 'w') as f:
dump(dict, f, ensure_ascii=False, indent=4)
print("Wrote config to", config_out_path)
def read_config(config_path):
with open(config_path) as config_buffer:
try:
config = json.loads(config_buffer.read())
except json.JSONDecodeError:
print(
"Your configuration file seems to be corruped. Please check if it is valid."
)
return config
if __name__ == "__main__":
_main_()
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"HDF5_USE_FILE_LOCKING",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "HDF5_USE_FILE_LOCKING", "CUDA_VISIBLE_DEVICES"]
|
python
| 3 | 0 | |
src/test/java/io/getstream/cloud/CloudFileStorageClientTest.java
|
package io.getstream.cloud;
import io.getstream.client.Client;
import io.getstream.client.FileStorageClient;
import java.io.File;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystems;
import org.junit.Test;
public class CloudFileStorageClientTest {
private static final String apiKey =
System.getenv("STREAM_KEY") != null
? System.getenv("STREAM_KEY")
: System.getProperty("STREAM_KEY");
private static final String secret =
System.getenv("STREAM_SECRET") != null
? System.getenv("STREAM_SECRET")
: System.getProperty("STREAM_SECRET");
@Test
public void uploadBytes() throws Exception {
FileStorageClient client = Client.builder(apiKey, secret).build().files();
URL result = client.upload("test.txt", "Hello World!".getBytes(StandardCharsets.UTF_8)).join();
}
@Test
public void uploadFile() throws Exception {
FileStorageClient client = Client.builder(apiKey, secret).build().files();
File file = FileSystems.getDefault().getPath("data", "test.txt").toFile();
URL result = client.upload(file).join();
}
@Test
public void delete() throws Exception {
FileStorageClient client = Client.builder(apiKey, secret).build().files();
URL result =
client.upload("test.txt", "Goodbye World!".getBytes(StandardCharsets.UTF_8)).join();
client.delete(result).join();
}
}
|
[
"\"STREAM_KEY\"",
"\"STREAM_KEY\"",
"\"STREAM_SECRET\"",
"\"STREAM_SECRET\""
] |
[] |
[
"STREAM_SECRET",
"STREAM_KEY"
] |
[]
|
["STREAM_SECRET", "STREAM_KEY"]
|
java
| 2 | 0 | |
xmlparser.py
|
import xml.etree.ElementTree as ET
import re
import mysql.connector
import spacy
nlp = spacy.load("de_core_news_sm") # load language model
from pathlib import Path
import time
# statistics
time_start_total = time.process_time()
data_counter = 0
time_total = 0
########
def SelectMaxID(id, table):
# Prepared statement
stmt_select = "SELECT MAX(" + id + ") FROM " + table + ";"
mycursor.execute(stmt_select)
id_str = mycursor.fetchone()
# convert tupel to string
id_str = id_str[0]
id_str = str(id_str)
# if no entry is found, then return None with type NoneType
# then id_word will return with the value 0
if id_str == "None":
id = 0
else:
# string to int
id = int(id_str)
return id
#######
# Connetion to database
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="subtitle_corpus"
)
# Initiation of db connection
mycursor = mydb.cursor()
# set max_allowed_packet
mycursor.execute("SET GLOBAL max_allowed_packet=10000000000000")
directory_tokenized = "untokenized corpus files/OpenSubtitles/raw/de"
pathlist = Path(directory_tokenized).glob('**/*.xml')
for xml_path in pathlist:
# timer for single file
time_start_file = time.process_time()
# because path is object not string
path_in_str = str(xml_path)
# import xml structure
try:
xml_root = ET.parse(xml_path).getroot()
except Exception as e:
print("Error while parsing")
print("File skipped!")
continue
#############################################
# collect meta informations
# filename -> id_subtitle
id_subtitle = xml_root.attrib # won't be in use
# release year annoted by opensubtitle.org. is faulty, do not use
movie_year = re.search(r'(de\\\d+\\)(\d+)', path_in_str).group(1).lstrip("de").strip("\\")
# foldername -> id_movie
id_movie = re.search(r'(de\\\d+\\)(\d+)', path_in_str).group(2)
try:
duration = xml_root.find('meta/subtitle/duration').text
except:
duration = ""
try:
genre = xml_root.find('meta/source/genre').text
genres = genre.split(',')
except:
genres = ""
try:
translated_language = xml_root.find('meta/subtitle/language').text
except:
translated_language = ""
try:
original_language = xml_root.find('meta/source/original').text
except:
original_language = ""
try:
country = xml_root.find('meta/source/country').text
except:
country = ""
try:
year = xml_root.find('meta/source/year').text
except:
year = ""
#####################################
# user feedback
print("Current file: " + movie_year + "/" + id_movie + "/" + id_subtitle['id'] + ".xml" )
#####################################
# insert meta
# check currently highets id_meta
id_meta = SelectMaxID("id_meta", "subtitle_meta")
id_meta = id_meta + 1
# process multiple genres and write meta infos to db
if len(genres) < 1:
stmt_insert = "INSERT INTO subtitle_meta (id_meta, id_movie, duration, translated_language, original_language, country, year, genre1) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
val_insert = (id_meta, id_movie, duration, translated_language, original_language, country, year, "")
mycursor.execute(stmt_insert, val_insert)
mydb.commit()
elif len(genres) == 1:
stmt_insert = "INSERT INTO subtitle_meta (id_meta, id_movie, duration, translated_language, original_language, country, year, genre1) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
val_insert = (id_meta, id_movie, duration, translated_language, original_language, country, year, genres[0])
mycursor.execute(stmt_insert, val_insert)
mydb.commit()
elif len(genres) == 2:
stmt_insert = "INSERT INTO subtitle_meta (id_meta, id_movie, duration, translated_language, original_language, country, year, genre1, genre2) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
val_insert = (id_meta, id_movie, duration, translated_language, original_language, country, year, genres[0], genres[1])
mycursor.execute(stmt_insert, val_insert)
mydb.commit()
elif len(genres) == 3:
stmt_insert = "INSERT INTO subtitle_meta (id_meta, id_movie, duration, translated_language, original_language, country, year, genre1, genre2, genre3) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val_insert = (id_meta, id_movie, duration, translated_language, original_language, country, year, genres[0], genres[1], genres[2])
mycursor.execute(stmt_insert, val_insert)
mydb.commit()
else:
print("Invalid genre information, skipping file: " + movie_year + "/" + id_movie + "/" + id_subtitle['id'] + ".xml")
#############################################
# gathering subtitle data and annotation with spacy
# check currently highest id_word
id_token = SelectMaxID("id_token", "subtitle_token")
# same with id_join
id_join = SelectMaxID("id_join", "subtitle_join")
# todo: change SelectMaxID into mycursor.lastrowid for better performance
# emtpy dictionarys. Needed to cache the data before the db commit
token_dict = []
join_dict = []
text_list = []
# iterate through the xml structure
for s in xml_root.findall('s'):
# finds text right after <s>
text_list.append(str(s.text).strip())
for tag_time in s.findall('time'):
# finds remaining text behind <time> elements
text_list.append(str(tag_time.tail).strip())
# remove empty elements from list
text_list_clean = list(filter(None, text_list))
# list to string
text_string = ""
for i in text_list_clean:
text_string = text_string + i + " "
# annotation with spaCy
# create spaCy object
doc = nlp(text_string)
for token in doc:
token_text = token.text
lemma = token.lemma_
pos = token.pos_
if not token.morph.get("Tense") == []:
tense_list = token.morph.get("Tense")
tense = str(tense_list).replace('[','').replace(']','').strip("'")
else:
tense = ""
if not token.morph.get("VerbForm") == []:
verb_form_list = token.morph.get("VerbForm")
verb_form = str(verb_form_list).replace('[','').replace(']','').strip("'")
else:
verb_form = ""
# dict for insert subtitle_words
id_token = id_token + 1
token_dict.append({ 'id_token' : id_token, 'token' : token_text, 'lemma' : lemma, 'pos' : pos, 'verb_form' : verb_form, 'tense' : tense})
# example:
# {'id_token': 5607, 'token': '?', 'lemma': '?', 'pos': 'PUNCT', 'verb_form': '', 'tense': ''},
# {'id_token': 5608, 'token': 'Ja', 'lemma': 'Ja', 'pos': 'PART', 'verb_form': '', 'tense': ''},
# {'id_token': 5609, 'token': '.', 'lemma': '.', 'pos': 'PUNCT', 'verb_form': '', 'tense': ''},
# {'id_token': 5610, 'token': 'Die', 'lemma': 'der', 'pos': 'DET', 'verb_form': '', 'tense': ''},
# {'id_token': 5611, 'token': 'Kinder', 'lemma': 'Kind', 'pos': 'NOUN', 'verb_form': '', 'tense': ''},
# {'id_token': 5612, 'token': 'freuen', 'lemma': 'freuen', 'pos': 'VERB', 'verb_form': ['Fin'], 'tense': ['Pres']},
# {'id_token': 5613, 'token': 'sich', 'lemma': 'sich', 'pos': 'PRON', 'verb_form': '', 'tense': ''},
# {'id_token': 5614, 'token': ',', 'lemma': ',', 'pos': 'PUNCT', 'verb_form': '', 'tense': ''},
# {'id_token': 5615, 'token': 'wenn', 'lemma': 'wenn', 'pos': 'SCONJ', 'verb_form': '', 'tense': ''},
# {'id_token': 5616, 'token': 'er', 'lemma': 'ich', 'pos': 'PRON', 'verb_form': '', 'tense': ''},
# dict for insert subtitle_join
id_join = id_join + 1
join_dict.append({ 'id_join' : id_join, 'id_token' : id_token, 'id_meta' : id_meta })
##############################
# insert gathered data into db
# subtitle_token
sql = "INSERT INTO subtitle_token ( id_token, token, lemma, pos, verb_form, tense ) VALUES ( %(id_token)s, %(token)s, %(lemma)s, %(pos)s, %(verb_form)s, %(tense)s )"
try:
mycursor.executemany(sql, token_dict)
mydb.commit()
except Exception as e:
print("Token Error:", e )
# subtitle_join
sql = "INSERT INTO subtitle_join ( id_join, id_token, id_meta ) VALUES ( %(id_join)s, %(id_token)s, %(id_meta)s )"
try:
mycursor.executemany(sql, join_dict)
mydb.commit()
except Exception as e:
print("Join Error:", e )
##############################
# user feedback and statistics
elapsed_time_file = time.process_time() - time_start_file
elapsed_time_total = time.process_time() - time_start_total
time_total = time_total + elapsed_time_total
print("File #", data_counter, "completed \nProcess time (file): ", elapsed_time_file , "\nProcess time (total): ", elapsed_time_total, "\n")
data_counter = data_counter + 1
##############################
print("\n\n", data_counter, " files imported.\nOverall process time: ", time_total, "\n\nDone!")
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
digester/emailer.py
|
import logging
import os
import requests
logger = logging.getLogger(__name__)
def send_email(text):
logger.info('Sending mail: %s', text)
response = requests.post(os.getenv('MAILGUN_URL'),
auth=("api", os.getenv('MAILGUN_APIKEY', '')),
data={
"from": os.getenv('EMAIL_SENDER'),
"to": [os.getenv('EMAIL_RECIPIENT')],
"subject": "Digest",
"text": text
})
if response.status_code != 200:
raise Exception(f'Could not send email. Reason: {response.content}')
|
[] |
[] |
[
"EMAIL_SENDER",
"MAILGUN_URL",
"EMAIL_RECIPIENT",
"MAILGUN_APIKEY"
] |
[]
|
["EMAIL_SENDER", "MAILGUN_URL", "EMAIL_RECIPIENT", "MAILGUN_APIKEY"]
|
python
| 4 | 0 | |
src/shippingservice/main.go
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net"
"os"
"time"
"cloud.google.com/go/profiler"
"contrib.go.opencensus.io/exporter/jaeger"
"contrib.go.opencensus.io/exporter/stackdriver"
"github.com/sirupsen/logrus"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
pb "github.com/GoogleCloudPlatform/microservices-demo/src/shippingservice/genproto"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
)
const (
defaultPort = "50051"
)
var log *logrus.Logger
func init() {
log = logrus.New()
log.Level = logrus.DebugLevel
log.Formatter = &logrus.JSONFormatter{
FieldMap: logrus.FieldMap{
logrus.FieldKeyTime: "timestamp",
logrus.FieldKeyLevel: "severity",
logrus.FieldKeyMsg: "message",
},
TimestampFormat: time.RFC3339Nano,
}
log.Out = os.Stdout
}
func main() {
if os.Getenv("DISABLE_TRACING") == "" {
log.Info("Tracing enabled.")
go initTracing()
} else {
log.Info("Tracing disabled.")
}
if os.Getenv("DISABLE_PROFILER") == "" {
log.Info("Profiling enabled.")
go initProfiling("shippingservice", "1.0.0")
} else {
log.Info("Profiling disabled.")
}
port := defaultPort
if value, ok := os.LookupEnv("PORT"); ok {
port = value
}
port = fmt.Sprintf(":%s", port)
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
var srv *grpc.Server
if os.Getenv("DISABLE_STATS") == "" {
log.Info("Stats enabled.")
srv = grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{}))
} else {
log.Info("Stats disabled.")
srv = grpc.NewServer()
}
svc := &server{}
pb.RegisterShippingServiceServer(srv, svc)
healthpb.RegisterHealthServer(srv, svc)
log.Infof("Shipping Service listening on port %s", port)
// Register reflection service on gRPC server.
reflection.Register(srv)
if err := srv.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
// server controls RPC service responses.
type server struct{}
// Check is for health checking.
func (s *server) Check(ctx context.Context, req *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
return &healthpb.HealthCheckResponse{Status: healthpb.HealthCheckResponse_SERVING}, nil
}
func (s *server) Watch(req *healthpb.HealthCheckRequest, ws healthpb.Health_WatchServer) error {
return status.Errorf(codes.Unimplemented, "health check via Watch not implemented")
}
// GetQuote produces a shipping quote (cost) in USD.
func (s *server) GetQuote(ctx context.Context, in *pb.GetQuoteRequest) (*pb.GetQuoteResponse, error) {
log.Info("[GetQuote] received request")
defer log.Info("[GetQuote] completed request")
// 1. Our quote system requires the total number of items to be shipped.
count := 0
for _, item := range in.GetItems() {
count += int(item.GetQuantity())
}
// 2. Generate a quote based on the total number of items to be shipped.
quote := CreateQuoteFromCount(count)
// 3. Generate a response.
return &pb.GetQuoteResponse{
CostUsd: &pb.Money{
CurrencyCode: "USD",
Units: int64(quote.Dollars),
Nanos: int32(quote.Cents * 10000000)},
}, nil
}
// ShipOrder mocks that the requested items will be shipped.
// It supplies a tracking ID for notional lookup of shipment delivery status.
func (s *server) ShipOrder(ctx context.Context, in *pb.ShipOrderRequest) (*pb.ShipOrderResponse, error) {
log.Info("[ShipOrder] received request")
defer log.Info("[ShipOrder] completed request")
// 1. Create a Tracking ID
baseAddress := fmt.Sprintf("%s, %s, %s", in.GetAddress().GetStreetAddress(), in.GetAddress().GetCity(), in.GetAddress().GetState())
id := CreateTrackingId(baseAddress)
// 2. Generate a response.
return &pb.ShipOrderResponse{
TrackingId: id,
}, nil
}
func initJaegerTracing() {
svcAddr := os.Getenv("JAEGER_SERVICE_ADDR")
if svcAddr == "" {
log.Info("jaeger initialization disabled.")
return
}
// Register the Jaeger exporter to be able to retrieve
// the collected spans.
exporter, err := jaeger.NewExporter(jaeger.Options{
Endpoint: fmt.Sprintf("http://%s", svcAddr),
Process: jaeger.Process{
ServiceName: "shippingservice",
},
})
if err != nil {
log.Fatal(err)
}
trace.RegisterExporter(exporter)
log.Info("jaeger initialization completed.")
}
func initStats(exporter *stackdriver.Exporter) {
view.SetReportingPeriod(60 * time.Second)
view.RegisterExporter(exporter)
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
log.Warn("Error registering default server views")
} else {
log.Info("Registered default server views")
}
}
func initStackdriverTracing() {
// TODO(ahmetb) this method is duplicated in other microservices using Go
// since they are not sharing packages.
for i := 1; i <= 3; i++ {
exporter, err := stackdriver.NewExporter(stackdriver.Options{})
if err != nil {
log.Warnf("failed to initialize Stackdriver exporter: %+v", err)
} else {
trace.RegisterExporter(exporter)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
log.Info("registered Stackdriver tracing")
// Register the views to collect server stats.
initStats(exporter)
return
}
d := time.Second * 10 * time.Duration(i)
log.Infof("sleeping %v to retry initializing Stackdriver exporter", d)
time.Sleep(d)
}
log.Warn("could not initialize Stackdriver exporter after retrying, giving up")
}
func initTracing() {
initJaegerTracing()
initStackdriverTracing()
}
func initProfiling(service, version string) {
// TODO(ahmetb) this method is duplicated in other microservices using Go
// since they are not sharing packages.
for i := 1; i <= 3; i++ {
if err := profiler.Start(profiler.Config{
Service: service,
ServiceVersion: version,
// ProjectID must be set if not running on GCP.
// ProjectID: "my-project",
}); err != nil {
log.Warnf("failed to start profiler: %+v", err)
} else {
log.Info("started Stackdriver profiler")
return
}
d := time.Second * 10 * time.Duration(i)
log.Infof("sleeping %v to retry initializing Stackdriver profiler", d)
time.Sleep(d)
}
log.Warn("could not initialize Stackdriver profiler after retrying, giving up")
}
|
[
"\"DISABLE_TRACING\"",
"\"DISABLE_PROFILER\"",
"\"DISABLE_STATS\"",
"\"JAEGER_SERVICE_ADDR\""
] |
[] |
[
"JAEGER_SERVICE_ADDR",
"DISABLE_TRACING",
"DISABLE_STATS",
"DISABLE_PROFILER"
] |
[]
|
["JAEGER_SERVICE_ADDR", "DISABLE_TRACING", "DISABLE_STATS", "DISABLE_PROFILER"]
|
go
| 4 | 0 | |
stage1/init/init.go
|
// Copyright 2014 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build linux
package main
// #cgo LDFLAGS: -ldl
// #include <stdlib.h>
// #include <dlfcn.h>
// #include <sys/types.h>
//
// int
// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid)
// {
// int (*sd_pid_get_owner_uid)(pid_t, uid_t *);
//
// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f;
// return sd_pid_get_owner_uid(pid, uid);
// }
//
// int
// my_sd_pid_get_unit(void *f, pid_t pid, char **unit)
// {
// int (*sd_pid_get_unit)(pid_t, char **);
//
// sd_pid_get_unit = (int (*)(pid_t, char **))f;
// return sd_pid_get_unit(pid, unit);
// }
//
// int
// my_sd_pid_get_slice(void *f, pid_t pid, char **slice)
// {
// int (*sd_pid_get_slice)(pid_t, char **);
//
// sd_pid_get_slice = (int (*)(pid_t, char **))f;
// return sd_pid_get_slice(pid, slice);
// }
//
import "C"
// this implements /init of stage1/nspawn+systemd
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"unsafe"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/goaci/proj2aci"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/schema/types"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/coreos/go-systemd/util"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/godbus/dbus"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/godbus/dbus/introspect"
"github.com/coreos/rkt/common"
"github.com/coreos/rkt/common/cgroup"
"github.com/coreos/rkt/networking"
"github.com/coreos/rkt/pkg/sys"
)
const (
// Path to systemd-nspawn binary within the stage1 rootfs
nspawnBin = "/usr/bin/systemd-nspawn"
// Path to the interpreter within the stage1 rootfs
interpBin = "/usr/lib/ld-linux-x86-64.so.2"
// Path to the localtime file/symlink in host
localtimePath = "/etc/localtime"
)
// mirrorLocalZoneInfo tries to reproduce the /etc/localtime target in stage1/ to satisfy systemd-nspawn
func mirrorLocalZoneInfo(root string) {
zif, err := os.Readlink(localtimePath)
if err != nil {
return
}
// On some systems /etc/localtime is a relative symlink, make it absolute
if !filepath.IsAbs(zif) {
zif = filepath.Join(filepath.Dir(localtimePath), zif)
zif = filepath.Clean(zif)
}
src, err := os.Open(zif)
if err != nil {
return
}
defer src.Close()
destp := filepath.Join(common.Stage1RootfsPath(root), zif)
if err = os.MkdirAll(filepath.Dir(destp), 0755); err != nil {
return
}
dest, err := os.OpenFile(destp, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return
}
defer dest.Close()
_, _ = io.Copy(dest, src)
}
var (
debug bool
privNet common.PrivateNetList
interactive bool
mdsToken string
localhostIP net.IP
localConfig string
)
func init() {
flag.BoolVar(&debug, "debug", false, "Run in debug mode")
flag.Var(&privNet, "private-net", "Setup private network")
flag.BoolVar(&interactive, "interactive", false, "The pod is interactive")
flag.StringVar(&mdsToken, "mds-token", "", "MDS auth token")
flag.StringVar(&localConfig, "local-config", common.DefaultLocalConfigDir, "Local config path")
// this ensures that main runs only on main thread (thread group leader).
// since namespace ops (unshare, setns) are done for a single thread, we
// must ensure that the goroutine does not jump from OS thread to thread
runtime.LockOSThread()
localhostIP = net.ParseIP("127.0.0.1")
if localhostIP == nil {
panic("localhost IP failed to parse")
}
}
// machinedRegister checks if nspawn should register the pod to machined
func machinedRegister() bool {
// machined has a D-Bus interface following versioning guidelines, see:
// http://www.freedesktop.org/wiki/Software/systemd/machined/
// Therefore we can just check if the D-Bus method we need exists and we
// don't need to check the signature.
var found int
conn, err := dbus.SystemBus()
if err != nil {
return false
}
node, err := introspect.Call(conn.Object("org.freedesktop.machine1", "/org/freedesktop/machine1"))
if err != nil {
return false
}
for _, iface := range node.Interfaces {
if iface.Name != "org.freedesktop.machine1.Manager" {
continue
}
// machined v215 supports methods "RegisterMachine" and "CreateMachine" called by nspawn v215.
// machined v216+ (since commit 5aa4bb) additionally supports methods "CreateMachineWithNetwork"
// and "RegisterMachineWithNetwork", called by nspawn v216+.
for _, method := range iface.Methods {
if method.Name == "CreateMachineWithNetwork" || method.Name == "RegisterMachineWithNetwork" {
found++
}
}
break
}
return found == 2
}
func lookupPath(bin string, paths string) (string, error) {
pathsArr := filepath.SplitList(paths)
for _, path := range pathsArr {
binPath := filepath.Join(path, bin)
binAbsPath, err := filepath.Abs(binPath)
if err != nil {
return "", fmt.Errorf("unable to find absolute path for %s", binPath)
}
d, err := os.Stat(binAbsPath)
if err != nil {
continue
}
// Check the executable bit, inspired by os.exec.LookPath()
if m := d.Mode(); !m.IsDir() && m&0111 != 0 {
return binAbsPath, nil
}
}
return "", fmt.Errorf("unable to find %q in %q", bin, paths)
}
func installAssets() error {
systemctlBin, err := lookupPath("systemctl", os.Getenv("PATH"))
if err != nil {
return err
}
bashBin, err := lookupPath("bash", os.Getenv("PATH"))
if err != nil {
return err
}
// More paths could be added in that list if some Linux distributions install it in a different path
// Note that we look in /usr/lib/... first because of the merge:
// http://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/
systemdShutdownBin, err := lookupPath("systemd-shutdown", "/usr/lib/systemd:/lib/systemd")
if err != nil {
return err
}
systemdBin, err := lookupPath("systemd", "/usr/lib/systemd:/lib/systemd")
if err != nil {
return err
}
assets := []string{}
assets = append(assets, proj2aci.GetAssetString("/usr/lib/systemd/systemd", systemdBin))
assets = append(assets, proj2aci.GetAssetString("/usr/bin/systemctl", systemctlBin))
assets = append(assets, proj2aci.GetAssetString("/usr/bin/bash", bashBin))
// systemd-shutdown has to be installed at the same path as on the host
// because it depends on systemd build flag -DSYSTEMD_SHUTDOWN_BINARY_PATH=
assets = append(assets, proj2aci.GetAssetString(systemdShutdownBin, systemdShutdownBin))
return proj2aci.PrepareAssets(assets, "./stage1/rootfs/", nil)
}
// getArgsEnv returns the nspawn args and env according to the usr used
func getArgsEnv(p *Pod, flavor string, debug bool) ([]string, []string, error) {
args := []string{}
env := os.Environ()
switch flavor {
case "kvm":
// kernel and lkvm are relative path, because init has /var/lib/rkt/..../uuid as its working directory
// TODO: move to path.go
kernelPath := filepath.Join(common.Stage1RootfsPath(p.Root), "bzImage")
lkvmPath := filepath.Join(common.Stage1RootfsPath(p.Root), "lkvm")
// TODO: base on resource isolators
cpu := 1
mem := 128
kernelParams := []string{
"console=hvc0",
"init=/usr/lib/systemd/systemd",
"no_timer_check",
"noreplace-smp",
"systemd.default_standard_error=journal+console",
"systemd.default_standard_output=journal+console",
// "systemd.default_standard_output=tty",
"tsc=reliable",
"MACHINEID=" + p.UUID.String(),
}
if debug {
kernelParams = append(kernelParams, []string{
"debug",
"systemd.log_level=debug",
"systemd.show_status=true",
// "systemd.confirm_spawn=true",
}...)
} else {
kernelParams = append(kernelParams, "quiet")
}
args = append(args, []string{
"./" + lkvmPath, // relative path
"run",
"--name", "rkt-" + p.UUID.String(),
"--no-dhcp", // speed bootup
"--cpu", strconv.Itoa(cpu),
"--mem", strconv.Itoa(mem),
"--console=virtio",
"--kernel", kernelPath,
"--disk", "stage1/rootfs", // relative to run/pods/uuid dir this is a place where systemd resides
// MACHINEID will be available as environment variable
"--params", strings.Join(kernelParams, " "),
}...,
)
if debug {
args = append(args, "--debug")
}
// TODO: host volume sharing with 9p
// TODO: append additional networks settings
// args = append(args, network/volumes args...)
return args, env, nil
case "coreos":
args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), interpBin))
args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin))
args = append(args, "--boot") // Launch systemd in the pod
if machinedRegister() {
args = append(args, fmt.Sprintf("--register=true"))
} else {
args = append(args, fmt.Sprintf("--register=false"))
}
// use only dynamic libraries provided in the image
env = append(env, "LD_LIBRARY_PATH="+filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib"))
case "src":
args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin))
args = append(args, "--boot") // Launch systemd in the pod
if machinedRegister() {
args = append(args, fmt.Sprintf("--register=true"))
} else {
args = append(args, fmt.Sprintf("--register=false"))
}
case "host":
hostNspawnBin, err := lookupPath("systemd-nspawn", os.Getenv("PATH"))
if err != nil {
return nil, nil, err
}
// Check dynamically which version is installed on the host
// Support version >= 220
versionBytes, err := exec.Command(hostNspawnBin, "--version").CombinedOutput()
if err != nil {
return nil, nil, fmt.Errorf("unable to probe %s version: %v", hostNspawnBin, err)
}
versionStr := strings.SplitN(string(versionBytes), "\n", 2)[0]
var version int
n, err := fmt.Sscanf(versionStr, "systemd %d", &version)
if err != nil {
return nil, nil, fmt.Errorf("cannot parse version: %q", versionStr)
}
if n != 1 || version < 220 {
return nil, nil, fmt.Errorf("rkt needs systemd-nspawn >= 220. %s version not supported: %v", hostNspawnBin, versionStr)
}
// Copy systemd, bash, etc. in stage1 at run-time
if err := installAssets(); err != nil {
return nil, nil, fmt.Errorf("cannot install assets from the host: %v", err)
}
args = append(args, hostNspawnBin)
args = append(args, "--boot") // Launch systemd in the pod
args = append(args, fmt.Sprintf("--register=true"))
default:
return nil, nil, fmt.Errorf("unrecognized stage1 flavor: %q", flavor)
}
// link journal only if the host is running systemd
if util.IsRunningSystemd() {
// we write /etc/machine-id here because systemd-nspawn needs it to link
// the container's journal to the host
mPath := filepath.Join(common.Stage1RootfsPath(p.Root), "etc", "machine-id")
mId := strings.Replace(p.UUID.String(), "-", "", -1)
if err := ioutil.WriteFile(mPath, []byte(mId), 0644); err != nil {
log.Fatalf("error writing /etc/machine-id: %v\n", err)
}
args = append(args, "--link-journal=try-guest")
}
if !debug {
args = append(args, "--quiet") // silence most nspawn output (log_warning is currently not covered by this)
env = append(env, "SYSTEMD_LOG_LEVEL=err") // silence log_warning too
}
keepUnit, err := isRunningFromUnitFile()
if err != nil {
return nil, nil, fmt.Errorf("error determining if we're running from a unit file: %v", err)
}
if keepUnit {
args = append(args, "--keep-unit")
}
nsargs, err := p.PodToNspawnArgs()
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate nspawn args: %v", err)
}
args = append(args, nsargs...)
// Arguments to systemd
args = append(args, "--")
args = append(args, "--default-standard-output=tty") // redirect all service logs straight to tty
if !debug {
args = append(args, "--log-target=null") // silence systemd output inside pod
// TODO remove --log-level=warning when we update stage1 to systemd v222
args = append(args, "--log-level=warning") // limit log output (systemd-shutdown ignores --log-target)
args = append(args, "--show-status=0") // silence systemd initialization status output
}
return args, env, nil
}
func withClearedCloExec(lfd int, f func() error) error {
err := sys.CloseOnExec(lfd, false)
if err != nil {
return err
}
defer sys.CloseOnExec(lfd, true)
return f()
}
func forwardedPorts(pod *Pod) ([]networking.ForwardedPort, error) {
fps := []networking.ForwardedPort{}
for _, ep := range pod.Manifest.Ports {
n := ""
fp := networking.ForwardedPort{}
for _, a := range pod.Manifest.Apps {
for _, p := range a.App.Ports {
if p.Name == ep.Name {
if n == "" {
fp.Protocol = p.Protocol
fp.HostPort = ep.HostPort
fp.PodPort = p.Port
n = a.Name.String()
} else {
return nil, fmt.Errorf("Ambiguous exposed port in PodManifest: %q and %q both define port %q", n, a.Name, p.Name)
}
}
}
}
if n == "" {
return nil, fmt.Errorf("Port name %q is not defined by any apps", ep.Name)
}
fps = append(fps, fp)
}
// TODO(eyakubovich): validate that there're no conflicts
return fps, nil
}
func writePpid(pid int) error {
// write ppid file as specified in
// Documentation/devel/stage1-implementors-guide.md
out, err := os.Getwd()
if err != nil {
return fmt.Errorf("Cannot get current working directory: %v\n", err)
}
// we are the parent of the process that is PID 1 in the container so we write our PID to "ppid"
err = ioutil.WriteFile(filepath.Join(out, "ppid"),
[]byte(fmt.Sprintf("%d\n", pid)), 0644)
if err != nil {
return fmt.Errorf("Cannot write ppid file: %v\n", err)
}
return nil
}
func stage1() int {
uuid, err := types.NewUUID(flag.Arg(0))
if err != nil {
fmt.Fprintln(os.Stderr, "UUID is missing or malformed")
return 1
}
root := "."
p, err := LoadPod(root, uuid)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load pod: %v\n", err)
return 1
}
// set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed when invoking
// network plugins
lfd, err := common.GetRktLockFD()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to get rkt lock fd: %v\n", err)
return 1
}
if err := sys.CloseOnExec(lfd, true); err != nil {
fmt.Fprintf(os.Stderr, "Failed to set FD_CLOEXEC on rkt lock: %v\n", err)
return 1
}
mirrorLocalZoneInfo(p.Root)
if privNet.Any() {
fps, err := forwardedPorts(p)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
return 6
}
n, err := networking.Setup(root, p.UUID, fps, privNet, localConfig)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to setup network: %v\n", err)
return 6
}
if err = n.Save(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to save networking state %v\n", err)
n.Teardown()
return 6
}
if len(mdsToken) > 0 {
hostIP, err := n.GetDefaultHostIP()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to get default Host IP: %v\n", err)
return 6
}
p.MetadataServiceURL = common.MetadataServicePublicURL(hostIP, mdsToken)
}
} else {
if len(mdsToken) > 0 {
p.MetadataServiceURL = common.MetadataServicePublicURL(localhostIP, mdsToken)
}
}
flavor, _, err := p.getFlavor()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to get stage1 flavor: %v\n", err)
return 3
}
if err = p.WritePrepareAppTemplate(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write prepare-app service template: %v\n", err)
return 2
}
if err = p.PodToSystemd(interactive); err != nil {
fmt.Fprintf(os.Stderr, "Failed to configure systemd: %v\n", err)
return 2
}
args, env, err := getArgsEnv(p, flavor, debug)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 3
}
// create a separate mount namespace so the cgroup filesystems
// are unmounted when exiting the pod
if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
log.Fatalf("error unsharing: %v", err)
}
// we recursively make / a "shared and slave" so mount events from the
// new namespace don't propagate to the host namespace but mount events
// from the host propagate to the new namespace and are forwarded to
// its peer group
// See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
if err := syscall.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SLAVE, ""); err != nil {
log.Fatalf("error making / a slave mount: %v", err)
}
if err := syscall.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SHARED, ""); err != nil {
log.Fatalf("error making / a shared and slave mount: %v", err)
}
var serviceNames []string
for _, app := range p.Manifest.Apps {
serviceNames = append(serviceNames, ServiceUnitName(app.Name))
}
s1Root := common.Stage1RootfsPath(p.Root)
machineID := p.GetMachineID()
subcgroup, err := getContainerSubCgroup(machineID)
if err == nil {
if err := cgroup.CreateCgroups(s1Root, subcgroup, serviceNames); err != nil {
fmt.Fprintf(os.Stderr, "Error creating cgroups: %v\n", err)
return 5
}
} else {
fmt.Fprintf(os.Stderr, "Continuing with per-app isolators disabled: %v\n", err)
}
if err = writePpid(os.Getpid()); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
return 4
}
err = withClearedCloExec(lfd, func() error {
return syscall.Exec(args[0], args, env)
})
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to execute %q: %v\n", args[0], err)
return 7
}
return 0
}
func getContainerSubCgroup(machineID string) (string, error) {
var subcgroup string
fromUnit, err := isRunningFromUnitFile()
if err != nil {
return "", fmt.Errorf("could not determine if we're running from a unit file: %v", err)
}
if fromUnit {
slice, err := getSlice()
if err != nil {
return "", fmt.Errorf("could not get slice name: %v", err)
}
slicePath, err := common.SliceToPath(slice)
if err != nil {
return "", fmt.Errorf("could not convert slice name to path: %v", err)
}
unit, err := getUnitFileName()
if err != nil {
return "", fmt.Errorf("could not get unit name: %v", err)
}
subcgroup = filepath.Join(slicePath, unit, "system.slice")
} else {
if machinedRegister() {
// we are not in the final cgroup yet: systemd-nspawn will move us
// to the correct cgroup later during registration so we can't
// look it up in /proc/self/cgroup
escapedmID := strings.Replace(machineID, "-", "\\x2d", -1)
machineDir := "machine-" + escapedmID + ".scope"
subcgroup = filepath.Join("machine.slice", machineDir, "system.slice")
} else {
// when registration is disabled the container will be directly
// under rkt's cgroup so we can look it up in /proc/self/cgroup
ownCgroupPath, err := cgroup.GetOwnCgroupPath("name=systemd")
if err != nil {
return "", fmt.Errorf("could not get own cgroup path: %v", err)
}
subcgroup = filepath.Join(ownCgroupPath, "system.slice")
}
}
return subcgroup, nil
}
func getUnitFileName() (unit string, err error) {
libname := C.CString("libsystemd.so")
defer C.free(unsafe.Pointer(libname))
handle := C.dlopen(libname, C.RTLD_LAZY)
if handle == nil {
err = fmt.Errorf("error opening libsystemd.so")
return
}
defer func() {
if r := C.dlclose(handle); r != 0 {
err = fmt.Errorf("error closing libsystemd.so")
}
}()
sym := C.CString("sd_pid_get_unit")
defer C.free(unsafe.Pointer(sym))
sd_pid_get_unit := C.dlsym(handle, sym)
if sd_pid_get_unit == nil {
err = fmt.Errorf("error resolving sd_pid_get_unit function")
return
}
var s string
u := C.CString(s)
defer C.free(unsafe.Pointer(u))
ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u)
if ret < 0 {
err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret))
return
}
unit = C.GoString(u)
return
}
func getSlice() (slice string, err error) {
libname := C.CString("libsystemd.so")
defer C.free(unsafe.Pointer(libname))
handle := C.dlopen(libname, C.RTLD_LAZY)
if handle == nil {
err = fmt.Errorf("error opening libsystemd.so")
return
}
defer func() {
if r := C.dlclose(handle); r != 0 {
err = fmt.Errorf("error closing libsystemd.so")
}
}()
sym := C.CString("sd_pid_get_slice")
defer C.free(unsafe.Pointer(sym))
sd_pid_get_slice := C.dlsym(handle, sym)
if sd_pid_get_slice == nil {
err = fmt.Errorf("error resolving sd_pid_get_slice function")
return
}
var s string
sl := C.CString(s)
defer C.free(unsafe.Pointer(sl))
ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl)
if ret < 0 {
err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret))
return
}
slice = C.GoString(sl)
return
}
func isRunningFromUnitFile() (ret bool, err error) {
libname := C.CString("libsystemd.so")
defer C.free(unsafe.Pointer(libname))
handle := C.dlopen(libname, C.RTLD_LAZY)
if handle == nil {
// we can't open libsystemd.so so we assume systemd is not
// installed and we're not running from a unit file
ret = false
return
}
defer func() {
if r := C.dlclose(handle); r != 0 {
err = fmt.Errorf("error closing libsystemd.so")
}
}()
sd_pid_get_owner_uid := C.dlsym(handle, C.CString("sd_pid_get_owner_uid"))
if sd_pid_get_owner_uid == nil {
err = fmt.Errorf("error resolving sd_pid_get_owner_uid function")
return
}
var uid C.uid_t
errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid)
// when we're running from a unit file, sd_pid_get_owner_uid returns
// ENOENT (systemd <220) or ENXIO (systemd >=220)
switch {
case errno >= 0:
ret = false
return
case syscall.Errno(-errno) == syscall.ENOENT || syscall.Errno(-errno) == syscall.ENXIO:
ret = true
return
default:
err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno))
return
}
}
func main() {
flag.Parse()
if !debug {
log.SetOutput(ioutil.Discard)
}
// move code into stage1() helper so defered fns get run
os.Exit(stage1())
}
|
[
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
tests/unit/test_bake_project.py
|
import os
import subprocess
import sys
from pathlib import Path
import py
import pytest
import yaml
from cookiecutter.exceptions import FailedHookException
from pipx.constants import DEFAULT_PIPX_BIN_DIR, LOCAL_BIN_DIR
from pytest_cookies.plugin import Cookies # type: ignore
from pytest_virtualenv import VirtualEnv
from tests.utils import inside_dir
def test_project_tree(cookies: Cookies) -> None:
result = cookies.bake(extra_context={"project_dir": "test-project"})
assert result.exit_code == 0
assert result.exception is None
assert result.project_path.name == "test-project"
def test_run_flake8(cookies: Cookies) -> None:
result = cookies.bake(extra_context={"project_dir": "flake8-compat"})
with inside_dir(str(result.project_path)):
subprocess.check_call(["flake8"])
def test_project_dir_hook(cookies: Cookies) -> None:
result = cookies.bake(extra_context={"project_dir": "myproject"})
assert result.exit_code == 0
result = cookies.bake(extra_context={"project_dir": "my-project"})
assert result.exit_code == 0
result = cookies.bake(extra_context={"project_dir": "my?project"})
assert result.exit_code != 0
if sys.platform == "win32":
# Unfortunately, pre_gen hook is called before cookiecutter copies the template
# into the TMP dir for rendering.
# This will not hurt the user,
# but the error message will also include a traceback
assert isinstance(result.exception, OSError)
else:
assert isinstance(result.exception, FailedHookException)
result = cookies.bake(extra_context={"project_dir": "t" * 256})
assert result.exit_code != 0
def test_project_id_hook(cookies: Cookies) -> None:
wrong_ids = [
"qwe/qwe",
"qwe?qwe",
"qwe!qwe",
"qwe.qwe",
"qwe%qwe",
"qwe-qwe",
"-qwe",
"qwe-",
"qwe-qwe",
"123",
"1 23",
"1qwe23",
]
correct_ids = [
"qwe",
"q",
"qwe_qwe",
"_qwe",
"qwe_",
"qwe123",
"qwe_123",
"qwe" * 20,
]
for id_ in wrong_ids:
result = cookies.bake(extra_context={"project_id": id_})
assert result.exit_code != 0, id_
assert isinstance(result.exception, FailedHookException)
for id_ in correct_ids:
result = cookies.bake(extra_context={"project_id": id_})
assert result.exit_code == 0, id_
@pytest.mark.parametrize("preserve_comments", ["yes", "no"])
def test_project_config_with_comments(cookies: Cookies, preserve_comments: str) -> None:
result = cookies.bake(
extra_context={
"project_dir": "project-with-comments",
"preserve Neuro Flow template hints": preserve_comments,
}
)
assert result.exit_code == 0
comment_sign = "#"
with inside_dir(str(result.project_path)):
live_file_content = Path(".neuro/live.yml").read_text()
project_file_content = Path(".neuro/project.yml").read_text()
l_com_exists = comment_sign in live_file_content
p_com_exists = comment_sign in project_file_content
if preserve_comments == "yes":
assert l_com_exists, ".neuro/live.yml file does not contain comments"
assert p_com_exists, ".neuro/project.yml file does not contain comments"
elif preserve_comments == "no":
assert not l_com_exists, ".neuro/live.yml file contains comments"
assert not p_com_exists, ".neuro/project.yml file contains comments"
else:
raise RuntimeError(
f"invalid value '{preserve_comments}' for 'preserve_comments' arg. "
" Only 'yes' and 'no' are allowed."
)
def test_project_description(cookies: Cookies) -> None:
descriptions = [
# " ",
"Descrition!",
"123",
"https://github.com/neuro-inc/cookiecutter-neuro-project/",
]
for descr in descriptions:
result = cookies.bake(extra_context={"project_description": descr})
assert result.exit_code == 0, descr
with inside_dir(str(result.project_path)):
readme_content = Path("README.md").read_text()
if descr:
assert "## Project description" in readme_content
assert descr in readme_content
@pytest.mark.parametrize("venv_install_packages", ["", "neuro-cli", "neuro-all"])
def test_user_role_added(
tmpdir: py.path.local, venv_install_packages: str, monkeypatch: pytest.MonkeyPatch
) -> None:
cwd = Path(os.getcwd())
# This 'hides' neuro-cli installed via pipx
cur_path = os.environ["PATH"].split(os.pathsep)
avoid_paths = (
str(LOCAL_BIN_DIR),
str(DEFAULT_PIPX_BIN_DIR),
)
filtered_path = list(filter(lambda x: x not in avoid_paths, cur_path))
monkeypatch.setenv("PATH", os.pathsep.join(filtered_path))
with VirtualEnv() as venv:
if venv_install_packages:
venv.install_package(venv_install_packages, installer="pip")
venv.run(
("cookiecutter", cwd, "-o", tmpdir, "--no-input", "--default-config"),
capture=True,
)
proj_yml = yaml.safe_load(
Path(tmpdir / "neuro project" / ".neuro" / "project.yml").read_text()
)
if venv_install_packages:
assert "owner" in proj_yml
assert "role" in proj_yml
else:
assert "owner" not in proj_yml
assert "role" not in proj_yml
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
config/config.go
|
package config
import (
"fmt"
"os"
"github.com/USACE/filestore"
)
type APIConfig struct {
Host string
Port int
FileStore *filestore.FileStore
DestinationCRS int
}
// Address tells the application where to run the api out of
func (app *APIConfig) Address() string {
return fmt.Sprintf("%s:%d", app.Host, app.Port)
}
// Init initializes the API's configuration
func Init() *APIConfig {
config := new(APIConfig)
config.Host = "" // 0.0.0.0
config.Port = 5600
config.FileStore = FileStoreInit(false)
config.DestinationCRS = 4326
return config
}
// FileStoreInit initializes the filestore object
func FileStoreInit(local bool) *filestore.FileStore {
var fs filestore.FileStore
var err error
switch local {
case true:
fs, err = filestore.NewFileStore(filestore.BlockFSConfig{})
if err != nil {
panic(err)
}
case false:
config := filestore.S3FSConfig{
S3Id: os.Getenv("AWS_ACCESS_KEY_ID"),
S3Key: os.Getenv("AWS_SECRET_ACCESS_KEY"),
S3Region: os.Getenv("AWS_DEFAULT_REGION"),
S3Bucket: os.Getenv("S3_BUCKET"),
}
fs, err = filestore.NewFileStore(config)
if err != nil {
panic(err)
}
}
return &fs
}
|
[
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_DEFAULT_REGION\"",
"\"S3_BUCKET\""
] |
[] |
[
"S3_BUCKET",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_DEFAULT_REGION"
] |
[]
|
["S3_BUCKET", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_DEFAULT_REGION"]
|
go
| 4 | 0 | |
src/z3c/celery/loader.py
|
from __future__ import absolute_import
import celery.concurrency.asynpool
import celery.loaders.app
import celery.signals
import celery.utils.collections
import imp
import logging.config
import os.path
import zope.app.wsgi
class ZopeLoader(celery.loaders.app.AppLoader):
"""Sets up the Zope environment in the Worker processes."""
def on_worker_init(self):
logging_ini = self.app.conf.get('LOGGING_INI')
if not logging_ini:
return
@celery.signals.setup_logging.connect(weak=False)
def setup_logging(*args, **kw):
"""Make the loglevel finely configurable via a config file."""
config_file = os.path.abspath(logging_ini)
logging.config.fileConfig(config_file, dict(
__file__=config_file, here=os.path.dirname(config_file)))
if self.app.conf.get('DEBUG_WORKER'):
assert self.app.conf.get('worker_pool') == 'solo'
self.on_worker_process_init()
# Work around <https://github.com/celery/celery/issues/4323>.
if self.app.conf.get('worker_boot_timeout'):
celery.concurrency.asynpool.PROC_ALIVE_TIMEOUT = float(
self.app.conf['worker_boot_timeout'])
def on_worker_process_init(self):
conf = self.app.conf
configfile = conf.get('ZOPE_CONF')
if not configfile:
raise ValueError(
'Celery setting ZOPE_CONF not set, '
'check celery worker config.')
db = zope.app.wsgi.config(configfile)
conf['ZODB'] = db
def on_worker_shutdown(self):
if 'ZODB' in self.app.conf:
self.app.conf['ZODB'].close()
def read_configuration(self):
"""Read configuration from either
* an importable python module, given by its dotted name in
CELERY_CONFIG_MODULE. Note that this can also be set via
`$ bin/celery worker --config=<modulename>`. (Also note that "celery
worker" includes the cwd on the pythonpath.)
* or a plain python file (given by an absolute path in
CELERY_CONFIG_FILE)
If neither env variable is present, no configuration is read, and some
defaults are used instead that most probably don't work (they assume
amqp on localhost as broker, for example).
"""
module = os.environ.get('CELERY_CONFIG_MODULE')
if module:
return super(ZopeLoader, self).read_configuration()
pyfile = os.environ.get('CELERY_CONFIG_FILE')
if pyfile:
module = self._import_pyfile(pyfile)
return celery.utils.collections.DictAttribute(module)
def _import_pyfile(self, filename):
"""Applies Celery configuration by reading the given python file
(absolute filename), which unfortunately Celery does not support.
(Code inspired by flask.config.Config.from_pyfile)
"""
module = imp.new_module('config')
module.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(
config_file.read(), filename, 'exec'), module.__dict__)
except IOError as e:
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise e
else:
return module
|
[] |
[] |
[
"CELERY_CONFIG_MODULE",
"CELERY_CONFIG_FILE"
] |
[]
|
["CELERY_CONFIG_MODULE", "CELERY_CONFIG_FILE"]
|
python
| 2 | 0 | |
device/iot-device-client/src/main/java/com/microsoft/azure/sdk/iot/device/ModuleClient.java
|
/*
* Copyright (c) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
package com.microsoft.azure.sdk.iot.device;
import com.microsoft.azure.sdk.iot.device.DeviceTwin.DeviceMethodCallback;
import com.microsoft.azure.sdk.iot.device.DeviceTwin.PropertyCallBack;
import com.microsoft.azure.sdk.iot.device.DeviceTwin.TwinPropertiesCallback;
import com.microsoft.azure.sdk.iot.device.DeviceTwin.TwinPropertyCallBack;
import com.microsoft.azure.sdk.iot.device.auth.IotHubAuthenticationProvider;
import com.microsoft.azure.sdk.iot.device.auth.SignatureProvider;
import com.microsoft.azure.sdk.iot.device.edge.HttpsHsmTrustBundleProvider;
import com.microsoft.azure.sdk.iot.device.edge.MethodRequest;
import com.microsoft.azure.sdk.iot.device.edge.MethodResult;
import com.microsoft.azure.sdk.iot.device.edge.TrustBundleProvider;
import com.microsoft.azure.sdk.iot.device.exceptions.ModuleClientException;
import com.microsoft.azure.sdk.iot.device.exceptions.TransportException;
import com.microsoft.azure.sdk.iot.device.hsm.HsmException;
import com.microsoft.azure.sdk.iot.device.hsm.HttpHsmSignatureProvider;
import com.microsoft.azure.sdk.iot.device.hsm.IotHubSasTokenHsmAuthenticationProvider;
import com.microsoft.azure.sdk.iot.device.transport.https.HttpsTransportManager;
import lombok.extern.slf4j.Slf4j;
import javax.net.ssl.SSLContext;
import java.io.IOException;
import java.net.URISyntaxException;
import java.security.NoSuchAlgorithmException;
import java.util.Map;
/**
* Public API for communicating from Edge Modules. A ModuleClient can be used to send messages from an Edge module to an EdgeHub or an IotHub.
* It can also send twin updates and listen for method calls from an EdgeHub or IotHub as well
*/
@Slf4j
public class ModuleClient extends InternalClient
{
private static final String DEFAULT_API_VERSION = "2018-06-28";
private static final long SEND_PERIOD_MILLIS = 10;
private static final long RECEIVE_PERIOD_MILLIS_AMQPS = 10;
private static final long RECEIVE_PERIOD_MILLIS_MQTT = 10;
private static final long RECEIVE_PERIOD_MILLIS_HTTPS = 25 * 60 * 1000; /*25 minutes*/
private static final int DEFAULT_SAS_TOKEN_TIME_TO_LIVE_SECONDS = 60 * 60; //1 hour
private static final int DEFAULT_SAS_TOKEN_BUFFER_PERCENTAGE = 85; //Token will go 85% of its life before renewing
private static final String IotEdgedUriVariableName = "IOTEDGE_WORKLOADURI";
private static final String IotHubHostnameVariableName = "IOTEDGE_IOTHUBHOSTNAME";
private static final String GatewayHostnameVariableName = "IOTEDGE_GATEWAYHOSTNAME";
private static final String DeviceIdVariableName = "IOTEDGE_DEVICEID";
private static final String ModuleIdVariableName = "IOTEDGE_MODULEID";
private static final String ModuleGenerationIdVariableName = "IOTEDGE_MODULEGENERATIONID";
private static final String AuthSchemeVariableName = "IOTEDGE_AUTHSCHEME";
private static final String SasTokenAuthScheme = "sasToken";
private static final String EdgehubConnectionstringVariableName = "EdgeHubConnectionString";
private static final String IothubConnectionstringVariableName = "IotHubConnectionString";
private static final String EdgeCaCertificateFileVariableName = "EdgeModuleCACertificateFile";
/**
* Constructor for a ModuleClient instance.
* @param connectionString The connection string for the edge module to connect to. Must be in format
* HostName=xxxx;deviceId=xxxx;SharedAccessKey=
* xxxx;moduleId=xxxx;
*
* or
*
* HostName=xxxx;DeviceId=xxxx;SharedAccessKey=
* xxxx;moduleId=xxxx;HostNameGateway=xxxx
* @param protocol The protocol to use when communicating with the module
* @throws ModuleClientException if an exception is encountered when parsing the connection string
* @throws UnsupportedOperationException if using any protocol besides MQTT, if the connection string is missing
* the "moduleId" field, or if the connection string uses x509
* @throws IllegalArgumentException if the provided connection string is null or empty, or if the provided protocol is null
* @throws URISyntaxException if the connection string cannot be parsed for a valid hostname
*/
public ModuleClient(String connectionString, IotHubClientProtocol protocol) throws ModuleClientException, IllegalArgumentException, UnsupportedOperationException, URISyntaxException
{
//Codes_SRS_MODULECLIENT_34_006: [This function shall invoke the super constructor.]
super(new IotHubConnectionString(connectionString), protocol, SEND_PERIOD_MILLIS, getReceivePeriod(protocol), null);
//Codes_SRS_MODULECLIENT_34_007: [If the provided protocol is not MQTT, AMQPS, MQTT_WS, or AMQPS_WS, this function shall throw an UnsupportedOperationException.]
//Codes_SRS_MODULECLIENT_34_004: [If the provided connection string does not contain a module id, this function shall throw an IllegalArgumentException.]
commonConstructorVerifications(protocol, this.config);
}
/**
* Constructor for a ModuleClient instance.
* @param connectionString The connection string for the edge module to connect to. Must be in format
* HostName=xxxx;deviceId=xxxx;SharedAccessKey=
* xxxx;moduleId=xxxx;
*
* or
*
* HostName=xxxx;DeviceId=xxxx;SharedAccessKey=
* xxxx;moduleId=xxxx;HostNameGateway=xxxx
* @param protocol The protocol to use when communicating with the module
* @param clientOptions The options that allow configuration of the module client instance during initialization
* @throws ModuleClientException if an exception is encountered when parsing the connection string
* @throws UnsupportedOperationException if using any protocol besides MQTT, if the connection string is missing
* the "moduleId" field, or if the connection string uses x509
* @throws IllegalArgumentException if the provided connection string is null or empty, or if the provided protocol is null
* @throws URISyntaxException if the connection string cannot be parsed for a valid hostname
*/
public ModuleClient(String connectionString, IotHubClientProtocol protocol, ClientOptions clientOptions) throws ModuleClientException, IllegalArgumentException, UnsupportedOperationException, URISyntaxException
{
super(new IotHubConnectionString(connectionString), protocol, SEND_PERIOD_MILLIS, getReceivePeriod(protocol), clientOptions);
commonConstructorVerifications(protocol, this.config);
}
/**
* Create a module client instance that uses x509 authentication.
*
* <p>Note! Communication from a module to another EdgeHub using x509 authentication is not currently supported and
* the service will always return "UNAUTHORIZED"</p>
*
* <p>Communication from a module directly to the IotHub does support x509 authentication, though.</p>
* @param connectionString The connection string for the edge module to connect to. Must be in format
* HostName=xxxx;deviceId=xxxx;SharedAccessKey=
* xxxx;moduleId=xxxx;
*
* or
*
* HostName=xxxx;DeviceId=xxxx;SharedAccessKey=
* xxxx;moduleId=xxxx;HostNameGateway=xxxx
* @param protocol The protocol to communicate with
* @param publicKeyCertificate The PEM formatted string for the public key certificate or the system path to the file containing the PEM.
* @param isCertificatePath 'false' if the publicKeyCertificate argument is a path to the PEM, and 'true' if it is the PEM string itself,
* @param privateKey The PEM formatted string for the private key or the system path to the file containing the PEM.
* @param isPrivateKeyPath 'false' if the privateKey argument is a path to the PEM, and 'true' if it is the PEM string itself,
* @throws URISyntaxException If the connString cannot be parsed
* @throws ModuleClientException if any other exception occurs while building the module client
* @throws URISyntaxException if the hostname in the connection string is not a valid URI
* @deprecated For x509 authentication, use {@link #ModuleClient(String, IotHubClientProtocol, ClientOptions)} and provide
* an SSLContext instance in the {@link ClientOptions} instance. For a sample on how to build this SSLContext,
* see <a href="https://github.com/Azure/azure-iot-sdk-java/blob/master/device/iot-device-samples/send-event-x509/src/main/java/samples/com/microsoft/azure/sdk/iot/SendEventX509.java">this code</a> which references
* a helper class for building SSLContext objects for x509 authentication as well as for SAS based authentication.
* When not using this deprecated constructor, you can safely exclude the Bouncycastle dependencies that this library declares.
* See <a href="https://github.com/Azure/azure-iot-sdk-java/blob/master/device/iot-device-samples/send-event-x509/pom.xml">this pom.xml</a> for an example of how to do this.
*/
@Deprecated
public ModuleClient(String connectionString, IotHubClientProtocol protocol, String publicKeyCertificate, boolean isCertificatePath, String privateKey, boolean isPrivateKeyPath) throws ModuleClientException, URISyntaxException
{
super(new IotHubConnectionString(connectionString), protocol, publicKeyCertificate, isCertificatePath, privateKey, isPrivateKeyPath, SEND_PERIOD_MILLIS, getReceivePeriod(protocol));
//Codes_SRS_MODULECLIENT_34_008: [If the provided protocol is not MQTT, AMQPS, MQTT_WS, or AMQPS_WS, this function shall throw an UnsupportedOperationException.]
//Codes_SRS_MODULECLIENT_34_009: [If the provided connection string does not contain a module id, this function shall throw an IllegalArgumentException.]
commonConstructorVerifications(protocol, this.getConfig());
}
/**
* Create a module client instance that uses the provided SSLContext for SSL negotiation.
*
* @param connectionString The connection string for the edge module to connect to. May be an x509 connection string
* or a SAS connection string. If it is an x509 connection string, the provided SSLContext will be
* used for x509 authentication
* @param protocol The protocol to communicate with
* @param sslContext the ssl context that will be used during authentication. If the provided connection string does not contain
* SAS based credentials, then the sslContext will be used for x509 authentication. If the provided connection string
* does contain SAS based credentials, the sslContext will still be used during SSL negotiation.
* @throws URISyntaxException if the hostname in the connection string is not a valid URI
* @deprecated For x509 authentication, use {@link #ModuleClient(String, IotHubClientProtocol, ClientOptions)} and provide
* an SSLContext instance in the {@link ClientOptions} instance. For a sample on how to build this SSLContext,
* see <a href="https://github.com/Azure/azure-iot-sdk-java/blob/master/device/iot-device-samples/send-event-x509/src/main/java/samples/com/microsoft/azure/sdk/iot/SendEventX509.java">this code</a> which references
* a helper class for building SSLContext objects for x509 authentication as well as for SAS based authentication.
* When not using this deprecated constructor, you can safely exclude the Bouncycastle dependencies that this library declares.
* See <a href="https://github.com/Azure/azure-iot-sdk-java/blob/master/device/iot-device-samples/send-event-x509/pom.xml">this pom.xml</a> for an example of how to do this.
*/
@Deprecated
public ModuleClient(String connectionString, IotHubClientProtocol protocol, SSLContext sslContext) throws ModuleClientException, URISyntaxException
{
super(new IotHubConnectionString(connectionString), protocol, sslContext, SEND_PERIOD_MILLIS, getReceivePeriod(protocol));
commonConstructorVerifications(protocol, this.getConfig());
}
/**
* Constructor that allows for the client's SAS token generation to be controlled by the user. Note that options in
* this client such as setting the SAS token expiry time will throw {@link UnsupportedOperationException} since
* the SDK no longer controls that when this constructor is used.
*
* @param hostName The host name of the IoT Hub that this client will connect to.
* @param deviceId The Id of the device containing the module that the connection will identify as.
* @param moduleId The Id of the module that the connection will identify as.
* @param sasTokenProvider The provider of all SAS tokens that are used during authentication.
* @param protocol The protocol that the client will connect over.
*/
public ModuleClient(String hostName, String deviceId, String moduleId, SasTokenProvider sasTokenProvider, IotHubClientProtocol protocol)
{
this(hostName, deviceId, moduleId, sasTokenProvider, protocol, null);
}
/**
* Constructor that allows for the client's SAS token generation to be controlled by the user. Note that options in
* this client such as setting the SAS token expiry time will throw {@link UnsupportedOperationException} since
* the SDK no longer controls that when this constructor is used.
*
* @param hostName The host name of the IoT Hub that this client will connect to.
* @param deviceId The Id of the device containing the module that the connection will identify as.
* @param moduleId The Id of the module that the connection will identify as.
* @param sasTokenProvider The provider of all SAS tokens that are used during authentication.
* @param protocol The protocol that the client will connect over.
* @param clientOptions The options that allow configuration of the module client instance during initialization.
*/
public ModuleClient(String hostName, String deviceId, String moduleId, SasTokenProvider sasTokenProvider, IotHubClientProtocol protocol, ClientOptions clientOptions)
{
super(hostName, deviceId, moduleId, sasTokenProvider, protocol, clientOptions, SEND_PERIOD_MILLIS, getReceivePeriod(protocol));
commonConstructorVerifications(protocol, this.getConfig());
}
/**
* Create a module client instance from your environment variables
* @return the created module client instance
* @throws ModuleClientException if the module client cannot be created
*/
public static ModuleClient createFromEnvironment() throws ModuleClientException
{
return createFromEnvironment(IotHubClientProtocol.AMQPS);
}
/**
* Create a module client instance from your environment variables
* @param protocol the protocol the module client instance will use
* @return the created module client instance
* @throws ModuleClientException if the module client cannot be created
*/
public static ModuleClient createFromEnvironment(IotHubClientProtocol protocol) throws ModuleClientException
{
return createFromEnvironment(protocol, null);
}
/**
* Create a module client instance from your environment variables
* @param protocol the protocol the module client instance will use
* @param clientOptions The options that allow configuration of the module client instance during initialization
* @return the created module client instance
* @throws ModuleClientException if the module client cannot be created
*/
public static ModuleClient createFromEnvironment(IotHubClientProtocol protocol, ClientOptions clientOptions) throws ModuleClientException
{
log.info("Creating module client from environment with protocol {}...", protocol);
Map<String, String> envVariables = System.getenv();
//Codes_SRS_MODULECLIENT_34_013: [This function shall check for a saved edgehub connection string.]
log.debug("Checking for an edgehub connection string...");
String connectionString = envVariables.get(EdgehubConnectionstringVariableName);
if (connectionString == null)
{
log.debug("No edgehub connection string was configured, checking for an IoThub connection string...");
//Codes_SRS_MODULECLIENT_34_019: [If no edgehub connection string is present, this function shall check for a saved iothub connection string.]
connectionString = envVariables.get(IothubConnectionstringVariableName);
}
// First try to create from connection string and if env variable for connection string is not found try to create from edgedUri
if (connectionString != null)
{
log.debug("Creating module client with the provided connection string");
//Codes_SRS_MODULECLIENT_34_020: [If an edgehub or iothub connection string is present, this function shall create a module client instance using that connection string and the provided protocol.]
ModuleClient moduleClient;
try
{
moduleClient = new ModuleClient(connectionString, protocol, clientOptions);
}
catch (URISyntaxException e)
{
throw new ModuleClientException("Could not create module client", e);
}
//Check for a different default cert to be used
String alternativeDefaultTrustedCert = envVariables.get(EdgeCaCertificateFileVariableName);
if (alternativeDefaultTrustedCert != null && !alternativeDefaultTrustedCert.isEmpty())
{
log.debug("Configuring module client to use the configured alternative trusted certificate");
//Codes_SRS_MODULECLIENT_34_031: [If an alternative default trusted cert is saved in the environment
// variables, this function shall set that trusted cert in the created module client.]
moduleClient.setOption_SetCertificatePath(alternativeDefaultTrustedCert);
}
return moduleClient;
}
else
{
log.info("No connection string was configured for this module, so it will get its credentials from the edgelet");
//Codes_SRS_MODULECLIENT_34_014: [This function shall check for environment variables for edgedUri, deviceId, moduleId,
// hostname, authScheme, gatewayHostname, and generationId. If any of these other than gatewayHostname is missing,
// this function shall throw a ModuleClientException.]
String edgedUri = envVariables.get(IotEdgedUriVariableName);
String deviceId = envVariables.get(DeviceIdVariableName);
String moduleId = envVariables.get(ModuleIdVariableName);
String hostname = envVariables.get(IotHubHostnameVariableName);
String authScheme = envVariables.get(AuthSchemeVariableName);
String gatewayHostname = envVariables.get(GatewayHostnameVariableName);
String generationId = envVariables.get(ModuleGenerationIdVariableName);
if (edgedUri == null)
{
throw new ModuleClientException("Environment variable " + IotEdgedUriVariableName + " is required.");
}
if (deviceId == null)
{
throw new ModuleClientException("Environment variable " + DeviceIdVariableName + " is required.");
}
if (moduleId == null)
{
throw new ModuleClientException("Environment variable " + ModuleIdVariableName + " is required.");
}
if (hostname == null)
{
throw new ModuleClientException("Environment variable " + IotHubHostnameVariableName + " is required.");
}
if (authScheme == null)
{
throw new ModuleClientException("Environment variable " + AuthSchemeVariableName + " is required.");
}
if (generationId == null)
{
throw new ModuleClientException("Environment variable " + ModuleGenerationIdVariableName + " is required");
}
if (!authScheme.equalsIgnoreCase(SasTokenAuthScheme))
{
//Codes_SRS_MODULECLIENT_34_030: [If the auth scheme environment variable is not "SasToken", this function shall throw a moduleClientException.]
throw new ModuleClientException("Unsupported authentication scheme. Supported scheme is " + SasTokenAuthScheme + ".");
}
SignatureProvider signatureProvider;
try
{
signatureProvider = new HttpHsmSignatureProvider(edgedUri, DEFAULT_API_VERSION);
}
catch (NoSuchAlgorithmException | URISyntaxException e)
{
throw new ModuleClientException("Could not use Hsm Signature Provider", e);
}
try
{
//Codes_SRS_MODULECLIENT_34_017: [This function shall create an authentication provider using the created
// signature provider, and the environment variables for deviceid, moduleid, hostname, gatewayhostname,
// and the default time for tokens to live and the default sas token buffer time.]
IotHubAuthenticationProvider iotHubAuthenticationProvider = IotHubSasTokenHsmAuthenticationProvider.create(signatureProvider, deviceId, moduleId, hostname, gatewayHostname, generationId, DEFAULT_SAS_TOKEN_TIME_TO_LIVE_SECONDS, DEFAULT_SAS_TOKEN_BUFFER_PERCENTAGE);
//Codes_SRS_MODULECLIENT_34_018: [This function shall return a new ModuleClient instance built from the created authentication provider and the provided protocol.]
ModuleClient moduleClient = new ModuleClient(iotHubAuthenticationProvider, protocol, SEND_PERIOD_MILLIS, getReceivePeriod(protocol));
if (gatewayHostname != null && !gatewayHostname.isEmpty())
{
//Codes_SRS_MODULECLIENT_34_032: [This function shall retrieve the trust bundle from the hsm and set them in the module client.]
TrustBundleProvider trustBundleProvider = new HttpsHsmTrustBundleProvider();
String trustCertificates = trustBundleProvider.getTrustBundleCerts(edgedUri, DEFAULT_API_VERSION);
moduleClient.setTrustedCertificates(trustCertificates);
}
return moduleClient;
}
catch (IOException | TransportException | HsmException | URISyntaxException e)
{
throw new ModuleClientException(e);
}
}
}
@SuppressWarnings("SameParameterValue") // The SEND_PERIOD is currently 10ms for all protocols, but can be made configurable in the future.
private ModuleClient(IotHubAuthenticationProvider iotHubAuthenticationProvider, IotHubClientProtocol protocol, long sendPeriodMillis, long receivePeriodMillis) throws IOException, TransportException
{
super(iotHubAuthenticationProvider, protocol, sendPeriodMillis, receivePeriodMillis);
}
/**
* Sends a message to a particular outputName asynchronously
*
* @param outputName the outputName to route the message to
* @param message the message to send
* @param callback the callback to be fired when the message is acknowledged by the service
* @param callbackContext the context to be included in the callback when fired
* @throws IllegalArgumentException if the provided outputName is null or empty
*/
public void sendEventAsync(Message message, IotHubEventCallback callback, Object callbackContext, String outputName) throws IllegalArgumentException
{
if (outputName == null || outputName.isEmpty())
{
//Codes_SRS_MODULECLIENT_34_001: [If the provided outputName is null or empty, this function shall throw an IllegalArgumentException.]
throw new IllegalArgumentException("outputName cannot be null or empty");
}
//Codes_SRS_MODULECLIENT_34_002: [This function shall set the provided message with the provided outputName.]
message.setOutputName(outputName);
//Codes_SRS_MODULECLIENT_34_003: [This function shall invoke super.sendEventAsync(message, callback, callbackContext).]
this.sendEventAsync(message, callback, callbackContext);
}
@Override
public void sendEventAsync(Message message, IotHubEventCallback callback, Object callbackContext) throws IllegalArgumentException
{
//Codes_SRS_MODULECLIENT_34_040: [This function shall set the message's connection moduleId to the config's saved module id.]
message.setConnectionModuleId(this.config.getModuleId());
//Codes_SRS_MODULECLIENT_34_041: [This function shall invoke super.sendEventAsync(message, callback, callbackContext).]
super.sendEventAsync(message, callback, callbackContext);
}
/**
* Invoke a method on a device
* @param deviceId the device to invoke a method on
* @param methodRequest the request containing the method to invoke on the device
* @return the result of the method call
* @throws ModuleClientException if the method cannot be invoked
* @throws IllegalArgumentException if deviceid is null or empty
*/
public MethodResult invokeMethod(String deviceId, MethodRequest methodRequest) throws ModuleClientException, IllegalArgumentException
{
if (deviceId == null || deviceId.isEmpty())
{
//Codes_SRS_MODULECLIENT_34_039: [If the provided deviceId is null or empty, this function shall throw an IllegalArgumentException.]
throw new IllegalArgumentException("DeviceId cannot be null or empty");
}
try
{
//Codes_SRS_MODULECLIENT_34_033: [This function shall create an HttpsTransportManager and use it to invoke the method on the device.]
HttpsTransportManager httpsTransportManager = new HttpsTransportManager(this.config);
httpsTransportManager.open();
return httpsTransportManager.invokeMethod(methodRequest, deviceId, "");
}
catch (URISyntaxException | IOException | TransportException e)
{
//Codes_SRS_MODULECLIENT_34_034: [If this function encounters an exception, it shall throw a moduleClientException with that exception nested.]
throw new ModuleClientException("Could not invoke method", e);
}
}
/**
* Invoke a method on a module
* @param deviceId the device the module belongs to
* @param moduleId the module to invoke the method on
* @param methodRequest the request containing the method to invoke on the device
* @return the result of the method call
* @throws ModuleClientException if the method cannot be invoked
* @throws IllegalArgumentException if deviceid is null or empty, or if moduleid is null or empty
*/
public MethodResult invokeMethod(String deviceId, String moduleId, MethodRequest methodRequest) throws ModuleClientException, IllegalArgumentException
{
if (deviceId == null || deviceId.isEmpty())
{
//Codes_SRS_MODULECLIENT_34_037: [If the provided deviceId is null or empty, this function shall throw an IllegalArgumentException.]
throw new IllegalArgumentException("DeviceId cannot be null or empty");
}
if (moduleId == null || moduleId.isEmpty())
{
//Codes_SRS_MODULECLIENT_34_038: [If the provided deviceId is null or empty, this function shall throw an IllegalArgumentException.]
throw new IllegalArgumentException("DeviceId cannot be null or empty");
}
try
{
//Codes_SRS_MODULECLIENT_34_035: [This function shall create an HttpsTransportManager and use it to invoke the method on the module.]
HttpsTransportManager httpsTransportManager = new HttpsTransportManager(this.config);
httpsTransportManager.open();
return httpsTransportManager.invokeMethod(methodRequest, deviceId, moduleId);
}
catch (URISyntaxException | IOException | TransportException e)
{
//Codes_SRS_MODULECLIENT_34_036: [If this function encounters an exception, it shall throw a moduleClientException with that exception nested.]
throw new ModuleClientException("Could not invoke method", e);
}
}
/**
* Retrieves the twin's latest desired properties
* @throws IOException if the iothub cannot be reached
*/
public void getTwin() throws IOException
{
this.getTwinInternal();
}
/**
* Starts the module twin. This module client will receive a callback with the current state of the full twin, including
* reported properties and desired properties. After that callback is received, this module client will receive a callback
* each time a desired property is updated. That callback will either contain the full desired properties set, or
* only the updated desired property depending on how the desired property was changed. IoT Hub supports a PUT and a PATCH
* on the twin. The PUT will cause this module client to receive the full desired properties set, and the PATCH
* will cause this module client to only receive the updated desired properties. Similarly, the version
* of each desired property will be incremented from a PUT call, and only the actually updated desired property will
* have its version incremented from a PATCH call. The java service client library uses the PATCH call when updated desired properties,
* but it builds the patch such that all properties are included in the patch. As a result, the device side will receive full twin
* updates, not partial updates.
*
* See <a href="https://docs.microsoft.com/en-us/rest/api/iothub/service/twin/replacemoduletwin">PUT</a> and
* <a href="https://docs.microsoft.com/en-us/rest/api/iothub/service/twin/updatemoduletwin">PATCH</a>
*
* @param deviceTwinStatusCallback the IotHubEventCallback callback for providing the status of Device Twin operations. Cannot be {@code null}.
* @param deviceTwinStatusCallbackContext the context to be passed to the status callback. Can be {@code null}.
* @param genericPropertyCallBack the PropertyCallBack callback for providing any changes in desired properties. Cannot be {@code null}.
* @param genericPropertyCallBackContext the context to be passed to the property callback. Can be {@code null}.
* @param <Type1> The type of the desired property key. Since the twin is a json object, the key will always be a String.
* @param <Type2> The type of the desired property value.
*
* @throws IllegalArgumentException if the callback is {@code null}
* @throws UnsupportedOperationException if called more than once on the same device
* @throws IOException if called when client is not opened
*/
public <Type1, Type2> void startTwin(IotHubEventCallback deviceTwinStatusCallback, Object deviceTwinStatusCallbackContext,
PropertyCallBack<Type1, Type2> genericPropertyCallBack, Object genericPropertyCallBackContext)
throws IOException, IllegalArgumentException, UnsupportedOperationException
{
this.startTwinInternal(deviceTwinStatusCallback, deviceTwinStatusCallbackContext, genericPropertyCallBack, genericPropertyCallBackContext);
}
/**
* Starts the module twin. This module client will receive a callback with the current state of the full twin, including
* reported properties and desired properties. After that callback is received, this module client will receive a callback
* each time a desired property is updated. That callback will either contain the full desired properties set, or
* only the updated desired property depending on how the desired property was changed. IoT Hub supports a PUT and a PATCH
* on the twin. The PUT will cause this module client to receive the full desired properties set, and the PATCH
* will cause this module client to only receive the updated desired properties. Similarly, the version
* of each desired property will be incremented from a PUT call, and only the actually updated desired property will
* have its version incremented from a PATCH call. The java service client library uses the PATCH call when updated desired properties,
* but it builds the patch such that all properties are included in the patch. As a result, the device side will receive full twin
* updates, not partial updates.
*
* See <a href="https://docs.microsoft.com/en-us/rest/api/iothub/service/twin/replacemoduletwin">PUT</a> and
* <a href="https://docs.microsoft.com/en-us/rest/api/iothub/service/twin/updatemoduletwin">PATCH</a>
*
* @param deviceTwinStatusCallback the IotHubEventCallback callback for providing the status of Device Twin operations. Cannot be {@code null}.
* @param deviceTwinStatusCallbackContext the context to be passed to the status callback. Can be {@code null}.
* @param genericPropertyCallBack the TwinPropertyCallBack callback for providing any changes in desired properties. Cannot be {@code null}.
* @param genericPropertyCallBackContext the context to be passed to the property callback. Can be {@code null}. *
*
* @throws IllegalArgumentException if the callback is {@code null}
* @throws UnsupportedOperationException if called more than once on the same device
* @throws IOException if called when client is not opened
*/
public void startTwin(IotHubEventCallback deviceTwinStatusCallback, Object deviceTwinStatusCallbackContext,
TwinPropertyCallBack genericPropertyCallBack, Object genericPropertyCallBackContext)
throws IOException, IllegalArgumentException, UnsupportedOperationException
{
this.startTwinInternal(deviceTwinStatusCallback, deviceTwinStatusCallbackContext, genericPropertyCallBack, genericPropertyCallBackContext);
}
/**
* Starts the module twin. This module client will receive a callback with the current state of the full twin, including
* reported properties and desired properties. After that callback is received, this module client will receive a callback
* each time a desired property is updated. That callback will either contain the full desired properties set, or
* only the updated desired property depending on how the desired property was changed. IoT Hub supports a PUT and a PATCH
* on the twin. The PUT will cause this module client to receive the full desired properties set, and the PATCH
* will cause this module client to only receive the updated desired properties. Similarly, the version
* of each desired property will be incremented from a PUT call, and only the actually updated desired property will
* have its version incremented from a PATCH call. The java service client library uses the PATCH call when updated desired properties,
* but it builds the patch such that all properties are included in the patch. As a result, the device side will receive full twin
* updates, not partial updates.
*
* See <a href="https://docs.microsoft.com/en-us/rest/api/iothub/service/twin/replacemoduletwin">PUT</a> and
* <a href="https://docs.microsoft.com/en-us/rest/api/iothub/service/twin/updatemoduletwin">PATCH</a>
*
* @param deviceTwinStatusCallback the IotHubEventCallback callback for providing the status of Device Twin operations. Cannot be {@code null}.
* @param deviceTwinStatusCallbackContext the context to be passed to the status callback. Can be {@code null}.
* @param genericPropertiesCallBack the TwinPropertyCallBack callback for providing any changes in desired properties. Cannot be {@code null}.
* @param genericPropertyCallBackContext the context to be passed to the property callback. Can be {@code null}.
*
* @throws IllegalArgumentException if the callback is {@code null}
* @throws UnsupportedOperationException if called more than once on the same device
* @throws IOException if called when client is not opened
*/
public void startTwin(IotHubEventCallback deviceTwinStatusCallback, Object deviceTwinStatusCallbackContext,
TwinPropertiesCallback genericPropertiesCallBack, Object genericPropertyCallBackContext)
throws IOException, IllegalArgumentException, UnsupportedOperationException
{
this.startTwinInternal(deviceTwinStatusCallback, deviceTwinStatusCallbackContext, genericPropertiesCallBack, genericPropertyCallBackContext);
}
/**
* Subscribes to method invocations on this module. This does not include method invocations on the device the module belongs to
*
* @param methodCallback Callback on which device methods shall be invoked. Cannot be {@code null}.
* @param methodCallbackContext Context for device method callback. Can be {@code null}.
* @param methodStatusCallback Callback for providing IotHub status for device methods. Cannot be {@code null}.
* @param methodStatusCallbackContext Context for device method status callback. Can be {@code null}.
*
* @throws IOException if called when client is not opened.
* @throws IllegalArgumentException if either callback are null.
*/
public void subscribeToMethod(DeviceMethodCallback methodCallback, Object methodCallbackContext,
IotHubEventCallback methodStatusCallback, Object methodStatusCallbackContext)
throws IOException, IllegalArgumentException
{
this.subscribeToMethodsInternal(methodCallback, methodCallbackContext, methodStatusCallback, methodStatusCallbackContext);
}
/**
* Sets the message callback.
*
* @param callback the message callback. Can be {@code null}.
* @param context the context to be passed to the callback. Can be {@code null}.
*
* @return itself, for fluent setting.
*
* @throws IllegalArgumentException if the callback is {@code null} but a context is
* passed in.
* @throws IllegalStateException if the callback is set after the client is
* closed.
*/
public ModuleClient setMessageCallback(MessageCallback callback, Object context)
{
this.setMessageCallbackInternal(callback, context);
return this;
}
/**
* Sets the message callback to be fired when a telemetry message arrives on the specified input channel. All other
* messages will trigger the default message callback in setMessageCallback(MessageCallback callback, Object context).
* Any message that triggers this callback will not also trigger the default callback.
*
* @param inputName the input name channel to listen for.
* @param callback the message callback. Can be {@code null}.
* @param context the context to be passed to the callback. Can be {@code null}.
*
* @return this object, for fluent setting
*/
public ModuleClient setMessageCallback(String inputName, MessageCallback callback, Object context)
{
if (inputName == null || inputName.isEmpty())
{
//Codes_SRS_MODULECLIENT_34_011: [If the provided inputName is null or empty, this function shall throw an IllegalArgumentException.]
throw new IllegalArgumentException("InputName must not be null or empty");
}
if (callback == null && context != null)
{
//Codes_SRS_MODULECLIENT_34_010: [If the provided callback is null and the provided context is not null, this function shall throw an IllegalArgumentException.]
throw new IllegalArgumentException("Cannot give non-null context for a null callback.");
}
//Codes_SRS_MODULECLIENT_34_012: [This function shall save the provided callback with context in config tied to the provided inputName.]
this.config.setMessageCallback(inputName, callback, context);
return this;
}
private static long getReceivePeriod(IotHubClientProtocol protocol)
{
switch (protocol)
{
case HTTPS:
return RECEIVE_PERIOD_MILLIS_HTTPS;
case AMQPS:
case AMQPS_WS:
return RECEIVE_PERIOD_MILLIS_AMQPS;
case MQTT:
case MQTT_WS:
return RECEIVE_PERIOD_MILLIS_MQTT;
default:
// should never happen.
throw new IllegalStateException(
"Invalid client protocol specified.");
}
}
private static void commonConstructorVerifications(IotHubClientProtocol protocol, DeviceClientConfig config)
{
if (protocol == IotHubClientProtocol.HTTPS)
{
throw new UnsupportedOperationException("Only MQTT, MQTT_WS, AMQPS and AMQPS_WS are supported for ModuleClient.");
}
if (config.getModuleId() == null || config.getModuleId().isEmpty())
{
throw new IllegalArgumentException("Connection string must contain field for ModuleId");
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
run_fetcher.py
|
# encoding: utf-8
"""
定时运行爬取器
"""
import time
import os
import django
import importlib
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ProxyPool.settings")
django.setup()
from proxy_api.models import Fetcher, StatusRecode
from fetchers.BaseFetcher import BaseFetcher
def main():
while True:
for fetcher_file in os.listdir("fetchers"):
# 过滤掉非目标的文件 和 基础类
if not fetcher_file.endswith("Fetcher.py") or fetcher_file == "BaseFetcher.py":
continue
# 获取类名
fetcher_class_name = fetcher_file.split(".")[0]
# 根据类名获取FetcherClass
FetcherClass = getattr(importlib.import_module(f"fetchers.{fetcher_class_name}"), fetcher_class_name)
# 根据类名检查是否已经同步到数据库
fetcher_objs = Fetcher.objects.filter(name=fetcher_class_name)
if fetcher_objs.count() == 0:
# 没有同步 则创建
fetcher_obj = Fetcher()
fetcher_obj.name = fetcher_class_name
fetcher_obj.save()
else:
# 已存在则取出
fetcher_obj = fetcher_objs[0]
if time.time() - fetcher_obj.last_fetch_time > FetcherClass.fetch_gap and fetcher_obj.enable:
fetcher = FetcherClass(fetcher_obj)
fetcher.run()
time.sleep(BaseFetcher.fetch_gap/3)
# 记录系统状态
print("记录系统状态")
StatusRecode.make_recode()
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/abapAddonAssemblyKitReleasePackages_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapAddonAssemblyKitReleasePackagesOptions struct {
AbapAddonAssemblyKitEndpoint string `json:"abapAddonAssemblyKitEndpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
AddonDescriptor string `json:"addonDescriptor,omitempty"`
}
type abapAddonAssemblyKitReleasePackagesCommonPipelineEnvironment struct {
abap struct {
addonDescriptor string
}
}
func (p *abapAddonAssemblyKitReleasePackagesCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value string
}{
{category: "abap", name: "addonDescriptor", value: p.abap.addonDescriptor},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// AbapAddonAssemblyKitReleasePackagesCommand This step releases the physical Delivery Packages
func AbapAddonAssemblyKitReleasePackagesCommand() *cobra.Command {
const STEP_NAME = "abapAddonAssemblyKitReleasePackages"
metadata := abapAddonAssemblyKitReleasePackagesMetadata()
var stepConfig abapAddonAssemblyKitReleasePackagesOptions
var startTime time.Time
var commonPipelineEnvironment abapAddonAssemblyKitReleasePackagesCommonPipelineEnvironment
var createAbapAddonAssemblyKitReleasePackagesCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step releases the physical Delivery Packages",
Long: `This step takes the list of Software Component Versions from the addonDescriptor in the commonPipelineEnvironment.
The physical Delivery Packages in status “L” are released and uploaded to the "ABAP CP" section in the SAP artifactory object
store. The new status "R"eleased is written back to the addonDescriptor in the commonPipelineEnvironment.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
abapAddonAssemblyKitReleasePackages(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapAddonAssemblyKitReleasePackagesFlags(createAbapAddonAssemblyKitReleasePackagesCmd, &stepConfig)
return createAbapAddonAssemblyKitReleasePackagesCmd
}
func addAbapAddonAssemblyKitReleasePackagesFlags(cmd *cobra.Command, stepConfig *abapAddonAssemblyKitReleasePackagesOptions) {
cmd.Flags().StringVar(&stepConfig.AbapAddonAssemblyKitEndpoint, "abapAddonAssemblyKitEndpoint", os.Getenv("PIPER_abapAddonAssemblyKitEndpoint"), "Base URL to the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.AddonDescriptor, "addonDescriptor", os.Getenv("PIPER_addonDescriptor"), "Structure in the commonPipelineEnvironment containing information about the Product Version and corresponding Software Component Versions")
cmd.MarkFlagRequired("abapAddonAssemblyKitEndpoint")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("addonDescriptor")
}
// retrieve step metadata
func abapAddonAssemblyKitReleasePackagesMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapAddonAssemblyKitReleasePackages",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "abapAddonAssemblyKitEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "addonDescriptor",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "abap/addonDescriptor",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_abapAddonAssemblyKitEndpoint\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\""
] |
[] |
[
"PIPER_addonDescriptor",
"PIPER_password",
"PIPER_username",
"PIPER_abapAddonAssemblyKitEndpoint"
] |
[]
|
["PIPER_addonDescriptor", "PIPER_password", "PIPER_username", "PIPER_abapAddonAssemblyKitEndpoint"]
|
go
| 4 | 0 | |
map_service/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'map_service.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ingest/api/ingestapi.py
|
#!/usr/bin/env python
"""
desc goes here
"""
import json
import logging
import os
import time
import uuid
from urllib.parse import urljoin, quote
import requests
from requests import HTTPError
from ingest.api.requests_utils import optimistic_session
class IngestApi:
def __init__(self, url=None, ingest_api_root=None):
format = '[%(filename)s:%(lineno)s - %(funcName)20s() ] %(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format)
logging.getLogger("requests").setLevel(logging.WARNING)
self.logger = logging.getLogger(__name__)
if not url and 'INGEST_API' in os.environ:
url = os.environ['INGEST_API']
# expand interpolated env vars
url = os.path.expandvars(url)
self.logger.info("using " + url + " for ingest API")
self.url = url if url else "http://localhost:8080"
self.headers = {'Content-type': 'application/json'}
self.submission_links = {}
self.token = None
self.ingest_api_root = ingest_api_root if ingest_api_root is not None else self.get_root_url()
def set_token(self, token):
self.token = token
def get_root_url(self):
reply = requests.get(self.url, headers=self.headers)
return reply.json()["_links"]
def get_link_from_resource_url(self, resource_url, link_name):
r = requests.get(resource_url, headers=self.headers)
r.raise_for_status()
links = r.json().get('_links', {})
return links.get(link_name, {}).get('href')
def get_link_from_resource(self, resource, link_name):
links = resource.get('_links', {})
return links.get(link_name, {}).get('href')
def get_schemas(self, latest_only=True, high_level_entity=None, domain_entity=None, concrete_entity=None):
schema_url = self.get_schemas_url()
all_schemas = []
filtered_schemas = {}
if latest_only:
search_url = self.get_link_from_resource_url(schema_url, "search")
r = requests.get(search_url, headers=self.headers)
if r.status_code == requests.codes.ok:
response_j = json.loads(r.text)
all_schemas = list(self.getRelatedEntities("latestSchemas", response_j, "schemas"))
else:
all_schemas = list(self.getEntities(schema_url, "schemas"))
if high_level_entity:
all_schemas = list(filter(lambda schema: schema.get('highLevelEntity') == high_level_entity, all_schemas))
if domain_entity:
all_schemas = list(filter(lambda schema: schema.get('domainEntity') == domain_entity, all_schemas))
if concrete_entity:
all_schemas = list(filter(lambda schema: schema.get('concreteEntity') == concrete_entity, all_schemas))
return all_schemas
def get_schemas_url(self):
if "schemas" in self.ingest_api_root:
return self.ingest_api_root["schemas"]["href"].rsplit("{")[0]
return None
def getSubmissions(self):
params = {'sort': 'submissionDate,desc'}
r = requests.get(self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0], params=params,
headers=self.headers)
if r.status_code == requests.codes.ok:
return json.loads(r.text)["_embedded"]["submissionEnvelopes"]
def getSubmissionIfModifiedSince(self, submissionId, datetimeUTC):
submissionUrl = self.getSubmissionUri(submissionId)
headers = self.headers
if datetimeUTC:
headers = {'If-Modified-Since': datetimeUTC}
self.logger.info('headers:' + str(headers))
r = requests.get(submissionUrl, headers=headers)
if r.status_code == requests.codes.ok:
submission = json.loads(r.text)
return submission
else:
self.logger.error(str(r))
def getProjects(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/projects'
r = requests.get(submissionUrl, headers=self.headers)
projects = []
if r.status_code == requests.codes.ok:
projects = json.loads(r.text)
return projects
def getProjectById(self, id):
submissionUrl = self.url + '/projects/' + id
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
project = json.loads(r.text)
return project
else:
raise ValueError("Project " + id + " could not be retrieved")
def getProjectByUuid(self, uuid):
return self.getEntityByUuid('projects', uuid)
def getEntityByUuid(self, entity_type, uuid):
url = self.url + f'/{entity_type}/search/findByUuid?uuid=' + uuid
# TODO make the endpoint consistent
if entity_type == 'submissionEnvelopes':
url = self.url + f'/{entity_type}/search/findByUuidUuid?uuid=' + uuid
r = requests.get(url, headers=self.headers)
r.raise_for_status()
return r.json()
def getFileBySubmissionUrlAndFileName(self, submissionUrl, fileName):
searchUrl = self._get_url_for_link(self.url + '/files/search', 'findBySubmissionEnvelopesInAndFileName')
searchUrl = searchUrl.replace('{?submissionEnvelope,fileName}', '')
r = requests.get(searchUrl, params={'submissionEnvelope': submissionUrl, 'fileName': fileName})
if r.status_code == requests.codes.ok:
return r.json()
return None
def getSubmissionEnvelope(self, submissionUrl):
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
submissionEnvelope = json.loads(r.text)
return submissionEnvelope
else:
raise ValueError("Submission Envelope " + submissionUrl + " could not be retrieved")
def getSubmissionByUuid(self, submissionUuid):
searchByUuidLink = self.get_link_from_resource_url(self.url + '/submissionEnvelopes/search', 'findByUuid')
searchByUuidLink = searchByUuidLink.replace('{?uuid}', '') # TODO: use a REST traverser instead of requests?
r = requests.get(searchByUuidLink, params={'uuid': submissionUuid})
if 200 <= r.status_code < 300:
return r.json()
else:
r.raise_for_status()
def getFiles(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/files'
r = requests.get(submissionUrl, headers=self.headers)
files = []
if r.status_code == requests.codes.ok:
files = json.loads(r.text)
return files
def getBundleManifests(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/bundleManifests'
r = requests.get(submissionUrl, headers=self.headers)
bundleManifests = []
if r.status_code == requests.codes.ok:
bundleManifests = json.loads(r.text)
return bundleManifests
def createSubmission(self, token):
auth_headers = {
'Content-type': 'application/json',
'Authorization': token
}
try:
r = requests.post(self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0], data="{}",
headers=auth_headers)
r.raise_for_status()
submission = r.json()
submission_url = submission["_links"]["self"]["href"].rsplit("{")[0]
self.submission_links[submission_url] = submission["_links"]
return submission_url
except requests.exceptions.RequestException as err:
self.logger.error("Request failed: " + str(err))
raise
def get_submission_links(self, submission_url):
if not self.submission_links.get(submission_url):
r = requests.get(submission_url, headers=self.headers)
r.raise_for_status()
self.submission_links[submission_url] = r.json()["_links"]
return self.submission_links.get(submission_url)
def get_link_in_submisssion(self, submission_url, link_name):
links = self.get_submission_links(submission_url)
link_obj = links.get(link_name) # TODO what if link doesn't exist
link = link_obj['href'].rsplit("{")[0]
return link
def finishSubmission(self, submissionUrl):
r = requests.put(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.update:
self.logger.info("Submission complete!")
return r.text
def updateSubmissionState(self, submissionId, state):
state_url = self.getSubmissionStateUrl(submissionId, state)
if state_url:
r = requests.put(state_url, headers=self.headers)
return self.handleResponse(r)
def getSubmissionStateUrl(self, submissionId, state):
submissionUrl = self.getSubmissionUri(submissionId)
response = requests.get(submissionUrl, headers=self.headers)
submission = self.handleResponse(response)
if submission and state in submission['_links']:
return submission['_links'][state]["href"].rsplit("{")[0]
return None
def handleResponse(self, response):
if response.ok:
return json.loads(response.text)
else:
self.logger.error('Response:' + response.text)
return None
def getSubmissionUri(self, submissionId):
return self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0] + "/" + submissionId
def get_full_url(self, callback_link):
return urljoin(self.url, callback_link)
def get_process(self, process_url):
r = requests.get(process_url, headers=self.headers)
r.raise_for_status()
return r.json()
def getAnalyses(self, submissionUrl):
return self.getEntities(submissionUrl, "analyses")
def getEntities(self, submissionUrl, entityType, pageSize=None):
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
if entityType in json.loads(r.text)["_links"]:
if not pageSize:
yield from self._getAllObjectsFromSet(json.loads(r.text)["_links"][entityType]["href"], entityType)
else:
yield from self._getAllObjectsFromSet(json.loads(r.text)["_links"][entityType]["href"], entityType, pageSize)
def _getAllObjectsFromSet(self, url, entityType, pageSize=None):
params = dict()
if pageSize:
params = {"size": pageSize}
r = requests.get(url, headers=self.headers, params=params)
r.raise_for_status()
if r.status_code == requests.codes.ok:
if "_embedded" in json.loads(r.text):
for entity in json.loads(r.text)["_embedded"][entityType]:
yield entity
if "next" in json.loads(r.text)["_links"]:
for entity2 in self._getAllObjectsFromSet(json.loads(r.text)["_links"]["next"]["href"], entityType):
yield entity2
def getRelatedEntities(self, relation, entity, entityType):
# get the self link from entity
if relation in entity["_links"]:
entityUri = entity["_links"][relation]["href"]
for entity in self._getAllObjectsFromSet(entityUri, entityType):
yield entity
def _updateStatusToPending(self, submissionUrl):
r = requests.patch(submissionUrl, data="{\"submissionStatus\" : \"Pending\"}", headers=self.headers)
def createProject(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "projects", self.token)
def createBiomaterial(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "biomaterials")
def createProcess(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "processes")
def createSubmissionManifest(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, 'submissionManifest')
def patch(self, url, patch):
r = requests.patch(url, json=patch)
r.raise_for_status()
return r
def createSubmissionError(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, 'submissionErrors')
def createProtocol(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "protocols")
def createFile(self, submissionUrl, file_name, jsonObject):
# TODO: why do we need the submission's links before we can create a file on it?
# TODO: submission_links should be a cache;
# TODO: getting a submission's links should look in the cache before retrieving it from the API
fileSubmissionsUrl = self.get_link_in_submisssion(submissionUrl, 'files')
fileSubmissionsUrl = fileSubmissionsUrl + "/" + quote(file_name)
fileToCreateObject = {
"fileName": file_name,
"content": json.loads(jsonObject) # TODO jsonObject should be a dict()
}
time.sleep(0.001)
with optimistic_session(fileSubmissionsUrl) as session:
r = session.post(fileSubmissionsUrl, data=json.dumps(fileToCreateObject),
headers=self.headers)
# TODO Investigate why core is returning internal server error
if r.status_code == requests.codes.conflict or r.status_code == requests.codes.internal_server_error:
searchFiles = self.getFileBySubmissionUrlAndFileName(submissionUrl, file_name)
if searchFiles and searchFiles.get('_embedded') and searchFiles['_embedded'].get('files'):
fileInIngest = searchFiles['_embedded'].get('files')[0]
content = fileInIngest.get('content')
newContent = json.loads(jsonObject)
if content:
content.update(newContent)
else:
content = newContent
fileUrl = fileInIngest['_links']['self']['href']
time.sleep(0.001)
r = requests.patch(fileUrl, data=json.dumps({'content': content}), headers=self.headers)
self.logger.debug(f'Updating existing content of file {fileUrl}.')
r.raise_for_status()
return r.json()
def createEntity(self, submissionUrl, jsonObject, entityType, token=None):
auth_headers = {'Content-type': 'application/json',
'Authorization': token
}
submissionUrl = self.get_link_in_submisssion(submissionUrl, entityType)
self.logger.debug("posting " + submissionUrl)
with optimistic_session(submissionUrl) as session:
r = session.post(submissionUrl, data=jsonObject, headers=auth_headers)
r.raise_for_status()
return r.json()
# given a HCA object return the URI for the object from ingest
def getObjectId(self, entity):
if "_links" in entity:
entityUrl = entity["_links"]["self"]["href"].rsplit("{")[0]
return entityUrl
raise ValueError('Can\'t get id for ' + json.dumps(entity) + ' is it a HCA entity?')
def getObjectUuid(self, entityUri):
r = requests.get(entityUri,
headers=self.headers)
if r.status_code == requests.codes.ok:
return json.loads(r.text)["uuid"]["uuid"]
def linkEntity(self, fromEntity, toEntity, relationship):
if not fromEntity:
raise ValueError("Error: fromEntity is None")
if not toEntity:
raise ValueError("Error: toEntity is None")
if not relationship:
raise ValueError("Error: relationship is None")
# check each dict in turn for non-None-ness
fromEntityLinks = fromEntity["_links"] if "_links" in fromEntity else None
if not fromEntityLinks:
raise ValueError("Error: fromEntity has no _links")
fromEntityLinksRelationship = fromEntityLinks[relationship] if relationship in fromEntityLinks else None
if not fromEntityLinksRelationship:
raise ValueError("Error: fromEntityLinks has no {0} relationship".format(relationship))
fromEntityLinksRelationshipHref = fromEntityLinksRelationship["href"] if "href" in fromEntityLinksRelationship else None
if not fromEntityLinksRelationshipHref:
raise ValueError("Error: fromEntityLinksRelationship for relationship {0} has no href".format(relationship))
fromUri = fromEntity["_links"][relationship]["href"]
toUri = self.getObjectId(toEntity)
self._retry_when_http_error(0, self._post_link_entity, fromUri, toUri)
def _post_link_entity(self, fromUri, toUri):
self.logger.debug('fromUri ' + fromUri + ' toUri:' + toUri);
headers = {'Content-type': 'text/uri-list'}
r = requests.post(fromUri.rsplit("{")[0],
data=toUri.rsplit("{")[0], headers=headers)
return r
def _retry_when_http_error(self, tries, func, *args):
max_retries = 5
if tries < max_retries:
if tries > 1:
self.logger.info("no of tries: " + str(tries + 1))
r = None
try:
time.sleep(0.001)
r = func(*args)
r.raise_for_status()
except HTTPError:
self.logger.error("\nResponse was: " + str(r.status_code) + " (" + r.text + ")")
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
except requests.ConnectionError as e:
self.logger.exception(str(e))
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
except Exception as e:
self.logger.exception(str(e))
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
return r
else:
error_message = "Maximum no of tries reached: " + str(max_retries)
self.logger.error(error_message)
return None
def _request_post(self, url, data, params, headers):
if params:
return requests.post(url, data=data, params=params, headers=headers)
return requests.post(url, data=data, headers=headers)
def _request_put(self, url, data, params, headers):
if params:
return requests.put(url, data=data, params=params, headers=headers)
return requests.put(url, data=data, headers=headers)
def createBundleManifest(self, bundleManifest):
r = self._retry_when_http_error(0, self._post_bundle_manifest, bundleManifest, self.ingest_api_root["bundleManifests"]["href"].rsplit("{")[0])
if not (200 <= r.status_code < 300):
error_message = "Failed to create bundle manifest at URL {0} with request payload: {1}".format(self.ingest_api_root["bundleManifests"]["href"].rsplit("{")[0],
json.dumps(bundleManifest.__dict__))
self.logger.error(error_message)
raise ValueError(error_message)
else:
self.logger.info("successfully created bundle manifest")
def _post_bundle_manifest(self, bundleManifest, url):
return requests.post(url, data=json.dumps(bundleManifest.__dict__), headers=self.headers)
def updateSubmissionWithStagingCredentials(self, subUrl, uuid, submissionCredentials):
stagingDetails = \
{
"stagingDetails": {
"stagingAreaUuid": {
"uuid": uuid
},
"stagingAreaLocation": {
"value": submissionCredentials
}
}
}
if self.retrySubmissionUpdateWithStagingDetails(subUrl, stagingDetails, 0):
self.logger.debug("envelope updated with staging details " + json.dumps(stagingDetails))
else:
self.logger.error("Failed to update envelope with staging details: " + json.dumps(stagingDetails))
def retrySubmissionUpdateWithStagingDetails(self, subUrl, stagingDetails, tries):
if tries < 5:
# do a GET request to get latest submission envelope
entity_response = requests.get(subUrl)
etag = entity_response.headers['ETag']
if etag:
# set the etag header so we get 412 if someone beats us to set validating
self.headers['If-Match'] = etag
r = requests.patch(subUrl, data=json.dumps(stagingDetails))
try:
r.raise_for_status()
return True
except HTTPError:
self.logger.error("PATCHing submission envelope with creds failed, retrying")
tries += 1
self.retrySubmissionUpdateWithStagingDetails(subUrl, stagingDetails, tries)
else:
return False
class BundleManifest:
def __init__(self):
self.bundleUuid = str(uuid.uuid4())
self.envelopeUuid = {}
self.dataFiles = []
self.fileBiomaterialMap = {}
self.fileProcessMap = {}
self.fileFilesMap = {}
self.fileProjectMap = {}
self.fileProtocolMap = {}
|
[] |
[] |
[
"INGEST_API"
] |
[]
|
["INGEST_API"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"github.com/chrislgardner/spellapi/db"
"github.com/gorilla/mux"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
)
var (
dbUrl string
)
func main() {
ctx, tp := initHoneycomb()
// Handle this error in a sensible manner where possible
defer func() { _ = tp.Shutdown(ctx) }()
dbUrl = os.Getenv("COSMOSDB_URI")
db, err := db.ConnectDb(dbUrl)
if err != nil {
panic(err)
}
var spellService SpellService
if ldApiKey := os.Getenv("LAUNCHDARKLY_KEY"); ldApiKey != "" {
ldclient, err := NewLaunchDarklyClient(ldApiKey, 5)
if err != nil {
panic(err)
}
spellService = SpellService{
store: db,
flags: ldclient,
}
} else {
spellService = SpellService{
store: db,
}
}
r := mux.NewRouter()
r.Use(otelmux.Middleware("SpellApi"))
// Routes consist of a path and a handler function.
r.HandleFunc("/spells/{name}", spellService.GetSpellHandler).Methods("GET")
r.HandleFunc("/spells/{name}", spellService.DeleteSpellHandler).Methods("DELETE")
r.HandleFunc("/spells", spellService.PostSpellHandler).Methods("POST")
r.HandleFunc("/spells", spellService.GetAllSpellHandler).Methods("GET")
r.HandleFunc("/spellmetadata/{name}", spellService.GetSpellMetadataHandler).Methods("GET")
r.HandleFunc("/spellmetadata", spellService.GetAllSpellMetadataHandler).Methods("GET")
// Bind to a port and pass our router in
port := os.Getenv("PORT")
if port == "" {
port = "80"
}
log.Fatal(http.ListenAndServe(":"+port, r))
}
func initHoneycomb() (context.Context, *sdktrace.TracerProvider) {
ctx := context.Background()
// Create an OTLP exporter, passing in Honeycomb credentials as environment variables.
exp, err := otlptracegrpc.New(ctx,
otlptracegrpc.WithEndpoint("api.honeycomb.io:443"),
otlptracegrpc.WithHeaders(map[string]string{
"x-honeycomb-team": os.Getenv("HONEYCOMB_KEY"),
"x-honeycomb-dataset": os.Getenv("HONEYCOMB_DATASET"),
}),
otlptracegrpc.WithTLSCredentials(credentials.NewClientTLSFromCert(nil, "")),
)
if err != nil {
fmt.Printf("failed to initialize exporter: %v", err)
}
res, err := resource.New(ctx,
resource.WithAttributes(
// the service name used to display traces in backends
semconv.ServiceNameKey.String("Encantus"),
),
)
if err != nil {
fmt.Printf("failed to initialize respource: %v", err)
}
// Create a new tracer provider with a batch span processor and the otlp exporter.
// Add a resource attribute service.name that identifies the service in the Honeycomb UI.
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
sdktrace.WithResource(res),
)
// Set the Tracer Provider and the W3C Trace Context propagator as globals
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(
propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}),
)
return ctx, tp
}
|
[
"\"COSMOSDB_URI\"",
"\"LAUNCHDARKLY_KEY\"",
"\"PORT\"",
"\"HONEYCOMB_KEY\"",
"\"HONEYCOMB_DATASET\""
] |
[] |
[
"PORT",
"LAUNCHDARKLY_KEY",
"HONEYCOMB_KEY",
"COSMOSDB_URI",
"HONEYCOMB_DATASET"
] |
[]
|
["PORT", "LAUNCHDARKLY_KEY", "HONEYCOMB_KEY", "COSMOSDB_URI", "HONEYCOMB_DATASET"]
|
go
| 5 | 0 | |
plugins/client/http/http.go
|
// Package http provides a http client
package http
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
"go-micro.dev/v4/broker"
"go-micro.dev/v4/client"
"go-micro.dev/v4/cmd"
"go-micro.dev/v4/codec"
raw "go-micro.dev/v4/codec/bytes"
errors "go-micro.dev/v4/errors"
"go-micro.dev/v4/metadata"
"go-micro.dev/v4/registry"
"go-micro.dev/v4/selector"
"go-micro.dev/v4/transport"
)
type httpClient struct {
once sync.Once
opts client.Options
}
func init() {
cmd.DefaultClients["http"] = NewClient
}
func (h *httpClient) next(request client.Request, opts client.CallOptions) (selector.Next, error) {
service := request.Service()
// get proxy
if prx := os.Getenv("MICRO_PROXY"); len(prx) > 0 {
service = prx
}
// get proxy address
if prx := os.Getenv("MICRO_PROXY_ADDRESS"); len(prx) > 0 {
opts.Address = []string{prx}
}
// return remote address
if len(opts.Address) > 0 {
return func() (*registry.Node, error) {
return ®istry.Node{
Address: opts.Address[0],
Metadata: map[string]string{
"protocol": "http",
},
}, nil
}, nil
}
// only get the things that are of mucp protocol
selectOptions := append(opts.SelectOptions, selector.WithFilter(
selector.FilterLabel("protocol", "http"),
))
// get next nodes from the selector
next, err := h.opts.Selector.Select(service, selectOptions...)
if err != nil && err == selector.ErrNotFound {
return nil, errors.NotFound("go.micro.client", err.Error())
} else if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
return next, nil
}
func (h *httpClient) call(ctx context.Context, node *registry.Node, req client.Request, rsp interface{}, opts client.CallOptions) error {
// set the address
address := node.Address
header := make(http.Header)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header.Set(k, v)
}
}
// set timeout in nanoseconds
header.Set("Timeout", fmt.Sprintf("%d", opts.RequestTimeout))
// set the content type for the request
header.Set("Content-Type", req.ContentType())
// get codec
cf, err := h.newHTTPCodec(req.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// marshal request
b, err := cf.Marshal(req.Body())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
buf := &buffer{bytes.NewBuffer(b)}
defer buf.Close()
// start with / or not
endpoint := req.Endpoint()
if !strings.HasPrefix(endpoint, "/") {
endpoint = "/" + endpoint
}
rawurl := "http://" + address + endpoint
// parse rawurl
URL, err := url.Parse(rawurl)
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
hreq := &http.Request{
Method: "POST",
URL: URL,
Header: header,
Body: buf,
ContentLength: int64(len(b)),
Host: address,
}
// make the request
hrsp, err := http.DefaultClient.Do(hreq.WithContext(ctx))
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
defer hrsp.Body.Close()
// parse response
b, err = io.ReadAll(hrsp.Body)
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// unmarshal
if err := cf.Unmarshal(b, rsp); err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
return nil
}
func (h *httpClient) stream(ctx context.Context, node *registry.Node, req client.Request, opts client.CallOptions) (client.Stream, error) {
// set the address
address := node.Address
header := make(http.Header)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header.Set(k, v)
}
}
// set timeout in nanoseconds
header.Set("Timeout", fmt.Sprintf("%d", opts.RequestTimeout))
// set the content type for the request
header.Set("Content-Type", req.ContentType())
// get codec
cf, err := h.newHTTPCodec(req.ContentType())
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
cc, err := net.Dial("tcp", address)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", fmt.Sprintf("Error dialing: %v", err))
}
return &httpStream{
address: address,
context: ctx,
closed: make(chan bool),
conn: cc,
codec: cf,
header: header,
reader: bufio.NewReader(cc),
request: req,
}, nil
}
func (h *httpClient) newHTTPCodec(contentType string) (Codec, error) {
if c, ok := defaultHTTPCodecs[contentType]; ok {
return c, nil
}
return nil, fmt.Errorf("Unsupported Content-Type: %s", contentType)
}
func (h *httpClient) newCodec(contentType string) (codec.NewCodec, error) {
if c, ok := h.opts.Codecs[contentType]; ok {
return c, nil
}
if cf, ok := defaultRPCCodecs[contentType]; ok {
return cf, nil
}
return nil, fmt.Errorf("Unsupported Content-Type: %s", contentType)
}
func (h *httpClient) Init(opts ...client.Option) error {
for _, o := range opts {
o(&h.opts)
}
return nil
}
func (h *httpClient) Options() client.Options {
return h.opts
}
func (h *httpClient) NewMessage(topic string, msg interface{}, opts ...client.MessageOption) client.Message {
return newHTTPMessage(topic, msg, "application/proto", opts...)
}
func (h *httpClient) NewRequest(service, method string, req interface{}, reqOpts ...client.RequestOption) client.Request {
return newHTTPRequest(service, method, req, h.opts.ContentType, reqOpts...)
}
func (h *httpClient) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
// make a copy of call opts
callOpts := h.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
// get next nodes from the selector
next, err := h.next(req, callOpts)
if err != nil {
return err
}
// check if we already have a deadline
d, ok := ctx.Deadline()
if !ok {
// no deadline so we create a new one
ctx, _ = context.WithTimeout(ctx, callOpts.RequestTimeout)
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along
opt := client.WithRequestTimeout(d.Sub(time.Now()))
opt(&callOpts)
}
// should we noop right here?
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
// make copy of call method
hcall := h.call
// wrap the call in reverse
for i := len(callOpts.CallWrappers); i > 0; i-- {
hcall = callOpts.CallWrappers[i-1](hcall)
}
// return errors.New("go.micro.client", "request timeout", 408)
call := func(i int) error {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
// select next node
node, err := next()
if err != nil && err == selector.ErrNotFound {
return errors.NotFound("go.micro.client", err.Error())
} else if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// make the call
err = hcall(ctx, node, req, rsp, callOpts)
h.opts.Selector.Mark(req.Service(), node, err)
return err
}
ch := make(chan error, callOpts.Retries)
var gerr error
for i := 0; i < callOpts.Retries; i++ {
go func() {
ch <- call(i)
}()
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case err := <-ch:
// if the call succeeded lets bail early
if err == nil {
return nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return rerr
}
if !retry {
return err
}
gerr = err
}
}
return gerr
}
func (h *httpClient) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
// make a copy of call opts
callOpts := h.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
// get next nodes from the selector
next, err := h.next(req, callOpts)
if err != nil {
return nil, err
}
// check if we already have a deadline
d, ok := ctx.Deadline()
if !ok {
// no deadline so we create a new one
ctx, _ = context.WithTimeout(ctx, callOpts.RequestTimeout)
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along
opt := client.WithRequestTimeout(d.Sub(time.Now()))
opt(&callOpts)
}
// should we noop right here?
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
call := func(i int) (client.Stream, error) {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
node, err := next()
if err != nil && err == selector.ErrNotFound {
return nil, errors.NotFound("go.micro.client", err.Error())
} else if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
stream, err := h.stream(ctx, node, req, callOpts)
h.opts.Selector.Mark(req.Service(), node, err)
return stream, err
}
type response struct {
stream client.Stream
err error
}
ch := make(chan response, callOpts.Retries)
var grr error
for i := 0; i < callOpts.Retries; i++ {
go func() {
s, err := call(i)
ch <- response{s, err}
}()
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case rsp := <-ch:
// if the call succeeded lets bail early
if rsp.err == nil {
return rsp.stream, nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return nil, rerr
}
if !retry {
return nil, rsp.err
}
grr = rsp.err
}
}
return nil, grr
}
func (h *httpClient) Publish(ctx context.Context, p client.Message, opts ...client.PublishOption) error {
options := client.PublishOptions{
Context: context.Background(),
}
for _, o := range opts {
o(&options)
}
md, ok := metadata.FromContext(ctx)
if !ok {
md = make(map[string]string)
}
md["Content-Type"] = p.ContentType()
md["Micro-Topic"] = p.Topic()
cf, err := h.newCodec(p.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
var body []byte
// passed in raw data
if d, ok := p.Payload().(*raw.Frame); ok {
body = d.Data
} else {
b := &buffer{bytes.NewBuffer(nil)}
if err := cf(b).Write(&codec.Message{Type: codec.Event}, p.Payload()); err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
body = b.Bytes()
}
h.once.Do(func() {
h.opts.Broker.Connect()
})
topic := p.Topic()
// get proxy
if prx := os.Getenv("MICRO_PROXY"); len(prx) > 0 {
options.Exchange = prx
}
// get the exchange
if len(options.Exchange) > 0 {
topic = options.Exchange
}
return h.opts.Broker.Publish(topic, &broker.Message{
Header: md,
Body: body,
})
}
func (h *httpClient) String() string {
return "http"
}
func newClient(opts ...client.Option) client.Client {
options := client.Options{
CallOptions: client.CallOptions{
Backoff: client.DefaultBackoff,
Retry: client.DefaultRetry,
Retries: client.DefaultRetries,
RequestTimeout: client.DefaultRequestTimeout,
DialTimeout: transport.DefaultDialTimeout,
},
}
for _, o := range opts {
o(&options)
}
if len(options.ContentType) == 0 {
options.ContentType = "application/proto"
}
if options.Broker == nil {
options.Broker = broker.DefaultBroker
}
if options.Registry == nil {
options.Registry = registry.DefaultRegistry
}
if options.Selector == nil {
options.Selector = selector.NewSelector(
selector.Registry(options.Registry),
)
}
rc := &httpClient{
once: sync.Once{},
opts: options,
}
c := client.Client(rc)
// wrap in reverse
for i := len(options.Wrappers); i > 0; i-- {
c = options.Wrappers[i-1](c)
}
return c
}
func NewClient(opts ...client.Option) client.Client {
return newClient(opts...)
}
|
[
"\"MICRO_PROXY\"",
"\"MICRO_PROXY_ADDRESS\"",
"\"MICRO_PROXY\""
] |
[] |
[
"MICRO_PROXY",
"MICRO_PROXY_ADDRESS"
] |
[]
|
["MICRO_PROXY", "MICRO_PROXY_ADDRESS"]
|
go
| 2 | 0 | |
backend/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'little_bonus_29102.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
zaws.go
|
package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/AlekSi/zabbix-sender"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"net"
"os"
"strconv"
"time"
)
type Zaws struct {
Region string
AccessKeyId string
SecretKeyId string
TargetId string
MetricName string
ZabbixHost string
ZabbixPort string
AwsSession *session.Session
}
func NewZaws() *Zaws {
zaws := new(Zaws)
zaws.SetOption()
zaws.AwsSession = session.New(&aws.Config{
Region: aws.String(zaws.Region),
Credentials: credentials.NewStaticCredentials(zaws.AccessKeyId, zaws.SecretKeyId, ""),
})
return zaws
}
func (z *Zaws) SetOption() {
flag.StringVar(&z.Region, "region", "ap-northeast-1", "Set AWS region")
flag.StringVar(&z.Region, "r", "ap-northeast-1", "Set AWS region")
flag.StringVar(&z.AccessKeyId, "key", os.Getenv("AWS_ACCESS_KEY_ID"), "Set AWS API Access key id")
flag.StringVar(&z.AccessKeyId, "k", os.Getenv("AWS_ACCESS_KEY_ID"), "Set AWS API Access key id")
flag.StringVar(&z.SecretKeyId, "secret", os.Getenv("AWS_SECRET_ACCESS_KEY"), "Set AWS API Secret key id")
flag.StringVar(&z.SecretKeyId, "s", os.Getenv("AWS_SECRET_ACCESS_KEY"), "Set AWS API Secret key id")
flag.StringVar(&z.TargetId, "id", "", "Set target object id")
flag.StringVar(&z.TargetId, "i", "", "Set target object id")
flag.StringVar(&z.MetricName, "metric", "", "Set metric name")
flag.StringVar(&z.MetricName, "m", "", "Set metric name")
flag.StringVar(&z.ZabbixHost, "host", "localhost", "Set zabbix host name")
flag.StringVar(&z.ZabbixHost, "h", "localhost", "Set zabbix host name")
flag.StringVar(&z.ZabbixPort, "port", "10051", "Set zabbix host port")
flag.StringVar(&z.ZabbixPort, "p", "10051", "Set zabbix host port")
flag.Parse()
if z.AccessKeyId == "" || z.SecretKeyId == "" {
fmt.Println("[ERROR]: Please set key information")
usage()
}
}
// Declare Struct
type LldJson struct {
Data []Data `json:"data"`
}
type Data struct {
MetricName string `json:"{#METRIC.NAME},omitempty"`
MetricUnit string `json:"{#METRIC.UNIT},omitempty"`
MetricNamespace string `json:"{#METRIC.NAMESPACE},omitempty"`
InstanceName string `json:"{#INSTANCE.NAME},omitempty"`
InstanceType string `json:"{#INSTANCE.TYPE},omitempty"`
InstanceId string `json:"{#INSTANCE.ID},omitempty"`
InstancePrivateAddr string `json:"{#INSTANCE.PRIVATE.ADDR},omitempty"`
ElbName string `json:"{#ELB.NAME},omitempty"`
ElbDnsName string `json:"{#ELB.DNS.NAME},omitempty"`
}
// Common util
func usage() {
fmt.Println("Usage: zaws service method [target] [-region|-r] [-key|-k] [-secret|-s] [-id|-i] [-metric|-m] [-host|h] [-port|p]")
os.Exit(1)
}
func convert_to_lldjson_string(data []Data) string {
lld_json := LldJson{data}
convert_json, _ := json.Marshal(lld_json)
return string(convert_json)
}
// Access AWS API
func get_metric_list(sess *session.Session, identity_name, target_id string) []*cloudwatch.Metric {
svc := cloudwatch.New(sess)
params := &cloudwatch.ListMetricsInput{
Dimensions: []*cloudwatch.DimensionFilter{
{
Name: aws.String(identity_name),
Value: aws.String(target_id),
},
},
}
resp, err := svc.ListMetrics(params)
if err != nil {
fmt.Printf("[ERROR] Fail ListMetrics API call: %s \n", err.Error())
return nil
}
return resp.Metrics
}
func get_metric_statistics(metric_name, metric_namespace string) *string {
sum_metric_list := []string{
"RequestCount",
"HTTPCode_Backend_2XX",
"HTTPCode_Backend_3XX",
"HTTPCode_Backend_4XX",
"HTTPCode_Backend_5XX",
"HTTPCode_ELB_4XX",
"HTTPCode_ELB_5XX",
"HTTPCode_ELB_5XX",
}
if metric_namespace == "AWS/ELB" {
for _, value := range sum_metric_list {
if value == metric_name {
return aws.String("Sum")
}
}
}
return aws.String("Average")
}
func get_metric_stats(sess *session.Session, identity_name, target_id, metric_name, metric_namespace string) []*cloudwatch.Datapoint {
svc := cloudwatch.New(sess)
t := time.Now()
input := &cloudwatch.GetMetricStatisticsInput{
Namespace: aws.String(metric_namespace),
Statistics: []*string{get_metric_statistics(metric_name, metric_namespace)},
EndTime: aws.Time(t),
Period: aws.Int64(300),
StartTime: aws.Time(t.Add(time.Duration(-10) * time.Minute)),
MetricName: aws.String(metric_name),
Dimensions: []*cloudwatch.Dimension{
{
Name: aws.String(identity_name),
Value: aws.String(target_id),
},
},
}
value, err := svc.GetMetricStatistics(input)
if err != nil {
fmt.Printf("[ERROR] Fail GetMetricStatistics API call: %s \n", err.Error())
return nil
}
return value.Datapoints
}
func get_ec2_list(sess *session.Session) []*ec2.Instance {
var instances []*ec2.Instance
svc := ec2.New(sess)
resp, err := svc.DescribeInstances(nil)
if err != nil {
fmt.Printf("[ERROR] Fail DescribeInstances API call: %s \n", err.Error())
os.Exit(1)
}
for _, reservation := range resp.Reservations {
instances = append(instances, reservation.Instances...)
}
return instances
}
func get_elb_list(sess *session.Session) []*elb.LoadBalancerDescription {
svc := elb.New(sess)
params := &elb.DescribeLoadBalancersInput{
LoadBalancerNames: []*string{},
}
resp, err := svc.DescribeLoadBalancers(params)
if err != nil {
fmt.Printf("[ERROR] Fail DescribeLoadBalancers API call: %s \n", err.Error())
return nil
}
return resp.LoadBalancerDescriptions
}
// zaws method
func (z *Zaws) ShowEc2List() {
list := make([]Data, 0)
instances := get_ec2_list(z.AwsSession)
for _, instance := range instances {
data := Data{InstanceType: *instance.InstanceType, InstanceId: *instance.InstanceId}
if instance.PrivateIpAddress != nil {
data.InstancePrivateAddr = *instance.PrivateIpAddress
}
for _, tag := range instance.Tags {
if *tag.Key == "Name" {
data.InstanceName = *tag.Value
}
}
if data.InstanceName == "" {
data.InstanceName = *instance.InstanceId
}
list = append(list, data)
}
fmt.Printf(convert_to_lldjson_string(list))
}
func (z *Zaws) ShowElbList() {
list := make([]Data, 0)
elbs := get_elb_list(z.AwsSession)
for _, elb := range elbs {
data := Data{ElbName: *elb.LoadBalancerName, ElbDnsName: *elb.DNSName}
list = append(list, data)
}
fmt.Printf(convert_to_lldjson_string(list))
}
func (z *Zaws) ShowEC2CloudwatchMetricsList() {
list := make([]Data, 0)
metrics := get_metric_list(z.AwsSession, "InstanceId", z.TargetId)
for _, metric := range metrics {
datapoints := get_metric_stats(z.AwsSession, "InstanceId", z.TargetId, *metric.MetricName, *metric.Namespace)
data := Data{MetricName: *metric.MetricName, MetricNamespace: *metric.Namespace}
if len(datapoints) > 0 {
data.MetricUnit = *datapoints[0].Unit
}
list = append(list, data)
}
fmt.Printf(convert_to_lldjson_string(list))
}
func (z *Zaws) ShowELBCloudwatchMetricsList() {
list := make([]Data, 0)
metrics := get_metric_list(z.AwsSession, "LoadBalancerName", z.TargetId)
for _, metric := range metrics {
datapoints := get_metric_stats(z.AwsSession, "LoadBalancerName", z.TargetId, *metric.MetricName, *metric.Namespace)
metric_name := *metric.MetricName
for _, dimension := range metric.Dimensions {
if *dimension.Name == "AvailabilityZone" {
metric_name = *metric.MetricName + "." + *dimension.Value
break
}
}
data := Data{MetricName: metric_name, MetricNamespace: *metric.Namespace}
if len(datapoints) > 0 {
data.MetricUnit = *datapoints[0].Unit
}
list = append(list, data)
}
fmt.Printf(convert_to_lldjson_string(list))
}
func (z *Zaws) SendEc2MetricStats() {
z.SendMetricStats("InstanceId")
}
func (z *Zaws) SendElbMetricStats() {
z.SendMetricStats("LoadBalancerName")
}
func (z *Zaws) SendMetricStats(identity_name string) {
var send_data []zabbix_sender.DataItem
metrics := get_metric_list(z.AwsSession, identity_name, z.TargetId)
for _, metric := range metrics {
datapoints := get_metric_stats(z.AwsSession, identity_name, z.TargetId, *metric.MetricName, *metric.Namespace)
metric_name := *metric.MetricName
for _, dimension := range metric.Dimensions {
if *dimension.Name == "AvailabilityZone" {
metric_name = *metric.MetricName + "." + *dimension.Value
break
}
}
if len(datapoints) > 0 {
data_time := *datapoints[0].Timestamp
var val float64
if datapoints[0].Average == (*float64)(nil) {
val = *datapoints[0].Sum
} else {
val = *datapoints[0].Average
}
send_data = append(send_data, zabbix_sender.DataItem{Hostname: z.TargetId, Key: "cloudwatch.metric[" + metric_name + "]", Value: strconv.FormatFloat(val, 'f', 4, 64), Timestamp: data_time.Unix()})
}
}
addr, _ := net.ResolveTCPAddr("tcp", z.ZabbixHost+":"+z.ZabbixPort)
res, err := zabbix_sender.Send(addr, send_data)
if err != nil {
fmt.Printf("[ERROR]: zabbix sender error!: %s", err)
os.Exit(1)
}
fmt.Printf("[INFO]: Successful sending data to Zabbix: resp", res)
}
func main() {
if len(os.Args) < 3 {
usage()
}
switch os.Args[1] {
case "ec2":
switch os.Args[2] {
case "list":
os.Args = os.Args[2:]
zaws := NewZaws()
zaws.ShowEc2List()
default:
usage()
}
case "elb":
switch os.Args[2] {
case "list":
os.Args = os.Args[2:]
zaws := NewZaws()
zaws.ShowElbList()
default:
usage()
}
case "cloudwatch":
switch os.Args[2] {
case "list":
if len(os.Args) < 4 {
usage()
}
switch os.Args[3] {
case "ec2":
os.Args = os.Args[3:]
zaws := NewZaws()
zaws.ShowEC2CloudwatchMetricsList()
case "rds":
case "elb":
os.Args = os.Args[3:]
zaws := NewZaws()
zaws.ShowELBCloudwatchMetricsList()
default:
usage()
}
case "stats":
if len(os.Args) < 4 {
usage()
}
switch os.Args[3] {
case "ec2":
os.Args = os.Args[3:]
zaws := NewZaws()
zaws.SendEc2MetricStats()
case "elb":
os.Args = os.Args[3:]
zaws := NewZaws()
zaws.SendElbMetricStats()
default:
usage()
}
default:
usage()
}
default:
usage()
}
os.Exit(0)
}
|
[
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_SECRET_ACCESS_KEY\""
] |
[] |
[
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
go
| 2 | 0 | |
test/distributed/elastic/multiprocessing/errors/api_test.py
|
#!/usr/bin/env python3
import json
import os
import shutil
import signal
import tempfile
import unittest
from unittest import mock
from torch.distributed.elastic.multiprocessing.errors import (
ChildFailedError,
ProcessFailure,
record,
)
from torch.distributed.elastic.multiprocessing.errors.error_handler import _write_error
from torch.testing._internal.common_utils import TEST_WITH_TSAN
class SentinelError(Exception):
# exists so that we can validate that
# the correct error is raised and propagated
pass
@record
def raise_exception_fn():
raise SentinelError("foobar")
@record
def good_fn():
print("hello world")
@record
def raise_child_failure_error_fn(name, child_error_file=""):
if child_error_file:
_write_error(SentinelError("foobar"), child_error_file)
pf = ProcessFailure(local_rank=0, pid=997, exitcode=1, error_file=child_error_file)
raise ChildFailedError(name, {0: pf})
def read_resource_file(resource_file: str) -> str:
with open(os.path.join(os.path.dirname(__file__), resource_file), "r") as fp:
return "".join(fp.readlines())
@unittest.skipIf(TEST_WITH_TSAN, "test incompatible with tsan")
class ApiTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
self.test_error_file = os.path.join(self.test_dir, "error.json")
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_failure_incorrect_reply_file(self):
content = {"unknown_key": "unknown_value"}
with open(self.test_error_file, "w") as fp:
json.dump(content, fp)
with self.assertRaises(Exception):
ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
def failure_with_error_file(self, exception):
_write_error(exception, self.test_error_file)
return ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
def failure_without_error_file(self, exitcode):
return ProcessFailure(
local_rank=0, pid=997, exitcode=exitcode, error_file="ignored.json"
)
def test_process_failure_new_format(self):
error_data = {"message": "test error message", "timestamp": 10}
with open(self.test_error_file, "w") as fp:
json.dump(error_data, fp)
pf = ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
self.assertEqual("test error message", pf.message)
self.assertEqual(10, pf.timestamp)
def test_process_mast_error_format(self):
error_data = {"message": "test error message", "timestamp": "10"}
with open(self.test_error_file, "w") as fp:
json.dump(error_data, fp)
pf = ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
self.assertEqual("test error message", pf.message)
self.assertEqual(10, pf.timestamp)
def test_process_failure(self):
pf = self.failure_with_error_file(exception=SentinelError("foobar"))
self.assertEqual(0, pf.local_rank)
self.assertEqual(997, pf.pid)
self.assertEqual(1, pf.exitcode)
self.assertEqual(self.test_error_file, pf.error_file)
self.assertEqual(
pf.error_file_data["message"]["extraInfo"]["timestamp"], str(pf.timestamp)
)
self.assertTrue(pf.message) # check not None and not "" (empty string)
self.assertEqual("<N/A>", pf.signal_name())
def test_process_failure_signal(self):
pf = self.failure_without_error_file(exitcode=-signal.SIGSEGV)
self.assertEqual("SIGSEGV", pf.signal_name())
self.assertEqual(
f"Signal {signal.SIGSEGV} (SIGSEGV) received by PID {pf.pid}", pf.message
)
def test_process_failure_no_error_file(self):
pf = self.failure_without_error_file(exitcode=138)
self.assertEqual("<N/A>", pf.signal_name())
self.assertEqual("<N/A>", pf.error_file)
self.assertEqual("Process failed with exitcode 138", pf.message)
def test_child_failed_error(self):
pf0 = self.failure_with_error_file(exception=SentinelError("rank 0"))
pf1 = self.failure_with_error_file(exception=SentinelError("rank 1"))
pf2 = self.failure_without_error_file(exitcode=138)
ex = ChildFailedError("trainer.par", {0: pf0, 1: pf1, 2: pf2})
self.assertEqual(pf0, ex.get_first_failure()[1])
# print is intentional and should prints something like this:
"""
*********************************************
trainer.par FAILED
=============================================
Root Cause:
[0]:
time: 2020-11-25_21:22:31
rank: 0 (local_rank: 0)
exitcode: 1 (pid: 997)
error_file: /tmp/ApiTesttbb37ier/error.json
msg: "SentinelError: rank 0"
=============================================
Other Failures:
[1]:
time: 2020-11-25_21:22:31
rank: 1 (local_rank: 0)
exitcode: 1 (pid: 997)
error_file: /tmp/ApiTesttbb37ier/error.json
msg: "SentinelError: rank 1"
[2]:
time: 2020-11-25_21:22:31
rank: 2 (local_rank: 0)
exitcode: 138 (pid: 997)
error_file: <N/A>
msg: "Process failed with exitcode 138"
*********************************************
"""
print(ex)
def test_record(self):
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
with self.assertRaises(SentinelError):
raise_exception_fn()
with open(self.test_error_file, "r") as fp:
err = json.load(fp)
self.assertIsNotNone(err["message"]["message"])
self.assertIsNotNone(err["message"]["extraInfo"]["py_callstack"])
self.assertIsNotNone(err["message"]["extraInfo"]["timestamp"])
def test_record_no_error_file(self):
with mock.patch.dict(os.environ, {}):
with self.assertRaises(SentinelError):
raise_exception_fn()
# no error file should have been generated
self.assertFalse(os.path.isfile(self.test_error_file))
def test_record_good_fn(self):
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
good_fn()
# function did not error; no error file should be produced
self.assertFalse(os.path.isfile(self.test_error_file))
def test_record_child_failure(self):
trainer_log_dir = os.path.join(self.test_dir, "trainer", "0")
os.makedirs(trainer_log_dir)
trainer_error_file = os.path.join(trainer_log_dir, "error.json")
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
with self.assertRaises(ChildFailedError) as cm:
raise_child_failure_error_fn("trainer", trainer_error_file)
pf = cm.exception.get_first_failure()[1]
# compare worker error file with reply file and overridden error code
expect = json.load(open(pf.error_file, "r"))
expect["message"]["errorCode"] = pf.exitcode
actual = json.load(open(self.test_error_file, "r"))
self.assertTrue(
json.dumps(expect, sort_keys=True),
json.dumps(actual, sort_keys=True),
)
def test_record_child_failure_no_child_error_file(self):
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
with self.assertRaises(ChildFailedError):
raise_child_failure_error_fn("trainer")
# @record should only copy child error file when ChildFailedError
# is raised - it should NOT record ChildFailedError itself
# it SHOULD re-raise ChildFailedError for any upstream system
# to handle it.
self.assertFalse(os.path.isfile(self.test_error_file))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/topicctl/subcmd/check.go
|
package subcmd
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/segmentio/topicctl/pkg/admin"
"github.com/segmentio/topicctl/pkg/check"
"github.com/segmentio/topicctl/pkg/cli"
"github.com/segmentio/topicctl/pkg/config"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var checkCmd = &cobra.Command{
Use: "check [topic configs]",
Short: "check that configs are valid and (optionally) match cluster state",
RunE: checkRun,
}
type checkCmdConfig struct {
checkLeaders bool
pathPrefix string
validateOnly bool
shared sharedOptions
}
var checkConfig checkCmdConfig
func init() {
checkCmd.Flags().StringVar(
&checkConfig.pathPrefix,
"path-prefix",
os.Getenv("TOPICCTL_APPLY_PATH_PREFIX"),
"Prefix for topic config paths",
)
checkCmd.Flags().BoolVar(
&checkConfig.checkLeaders,
"check-leaders",
false,
"Check leaders",
)
checkCmd.Flags().BoolVar(
&checkConfig.validateOnly,
"validate-only",
false,
"Validate configs only, without connecting to cluster",
)
addSharedConfigOnlyFlags(checkCmd, &checkConfig.shared)
RootCmd.AddCommand(checkCmd)
}
func checkRun(cmd *cobra.Command, args []string) error {
ctx := context.Background()
// Keep a cache of the admin clients with the cluster config path as the key
adminClients := map[string]admin.Client{}
defer func() {
for _, adminClient := range adminClients {
adminClient.Close()
}
}()
matchCount := 0
okCount := 0
for _, arg := range args {
if checkConfig.pathPrefix != "" && !filepath.IsAbs(arg) {
arg = filepath.Join(checkConfig.pathPrefix, arg)
}
matches, err := filepath.Glob(arg)
if err != nil {
return err
}
for _, match := range matches {
matchCount++
ok, err := checkTopicFile(ctx, match, adminClients)
if err != nil {
return err
}
if ok {
okCount++
}
}
}
if matchCount == 0 {
return fmt.Errorf("No topic configs match the provided args (%+v)", args)
} else if matchCount > okCount {
return fmt.Errorf(
"Check failed for %d/%d topic configs",
matchCount-okCount,
matchCount,
)
}
return nil
}
func checkTopicFile(
ctx context.Context,
topicConfigPath string,
adminClients map[string]admin.Client,
) (bool, error) {
clusterConfigPath, err := clusterConfigForTopicCheck(topicConfigPath)
if err != nil {
return false, err
}
clusterConfig, err := config.LoadClusterFile(clusterConfigPath)
if err != nil {
return false, err
}
topicConfigs, err := config.LoadTopicsFile(topicConfigPath)
if err != nil {
return false, err
}
var adminClient admin.Client
if !checkConfig.validateOnly {
var ok bool
adminClient, ok = adminClients[clusterConfigPath]
if !ok {
adminClient, err = clusterConfig.NewAdminClient(
ctx,
nil,
true,
checkConfig.shared.saslUsername,
checkConfig.shared.saslPassword,
)
if err != nil {
return false, err
}
adminClients[clusterConfigPath] = adminClient
}
}
cliRunner := cli.NewCLIRunner(adminClient, log.Infof, false)
for _, topicConfig := range topicConfigs {
topicConfig.SetDefaults()
log.Debugf(
"Processing topic %s in config %s with cluster config %s",
topicConfig.Meta.Name,
topicConfigPath,
clusterConfigPath,
)
topicCheckConfig := check.CheckConfig{
AdminClient: adminClient,
CheckLeaders: checkConfig.checkLeaders,
ClusterConfig: clusterConfig,
// TODO: Add support for broker rack verification.
NumRacks: -1,
TopicConfig: topicConfig,
ValidateOnly: checkConfig.validateOnly,
}
result, err := cliRunner.CheckTopic(
ctx,
topicCheckConfig,
)
if !result || err != nil {
return result, err
}
}
return true, nil
}
func clusterConfigForTopicCheck(topicConfigPath string) (string, error) {
if checkConfig.shared.clusterConfig != "" {
return checkConfig.shared.clusterConfig, nil
}
return filepath.Abs(
filepath.Join(
filepath.Dir(topicConfigPath),
"..",
"cluster.yaml",
),
)
}
|
[
"\"TOPICCTL_APPLY_PATH_PREFIX\""
] |
[] |
[
"TOPICCTL_APPLY_PATH_PREFIX"
] |
[]
|
["TOPICCTL_APPLY_PATH_PREFIX"]
|
go
| 1 | 0 | |
truststore_nss.go
|
// Copyright 2018 The mkcert Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
var (
hasNSS bool
hasCertutil bool
certutilPath string
nssDBs = []string{
filepath.Join(os.Getenv("HOME"), ".pki/nssdb"),
filepath.Join(os.Getenv("HOME"), "snap/chromium/current/.pki/nssdb"), // Snapcraft
"/etc/pki/nssdb", // CentOS 7
}
firefoxPaths = []string{
"/usr/bin/firefox", "/Applications/Firefox.app",
"/Applications/Firefox Developer Edition.app",
"/Applications/Firefox Nightly.app",
"C:\\Program Files\\Mozilla Firefox",
}
)
func init() {
allPaths := append(append([]string{}, nssDBs...), firefoxPaths...)
for _, path := range allPaths {
if pathExists(path) {
hasNSS = true
break
}
}
switch runtime.GOOS {
case "darwin":
switch {
case binaryExists("certutil"):
certutilPath, _ = exec.LookPath("certutil")
hasCertutil = true
case binaryExists("/usr/local/opt/nss/bin/certutil"):
// Check the default Homebrew path, to save executing Ruby. #135
certutilPath = "/usr/local/opt/nss/bin/certutil"
hasCertutil = true
default:
out, err := exec.Command("brew", "--prefix", "nss").Output()
if err == nil {
certutilPath = filepath.Join(strings.TrimSpace(string(out)), "bin", "certutil")
hasCertutil = pathExists(certutilPath)
}
}
case "linux":
if hasCertutil = binaryExists("certutil"); hasCertutil {
certutilPath, _ = exec.LookPath("certutil")
}
}
}
func (m *mkcert) checkNSS() bool {
if !hasCertutil {
return false
}
success := true
if m.forEachNSSProfile(func(profile string) {
err := exec.Command(certutilPath, "-V", "-d", profile, "-u", "L", "-n", m.caUniqueName()).Run()
if err != nil {
success = false
}
}) == 0 {
success = false
}
return success
}
func (m *mkcert) installNSS() bool {
if m.forEachNSSProfile(func(profile string) {
cmd := exec.Command(certutilPath, "-A", "-d", profile, "-t", "C,,", "-n", m.caUniqueName(), "-i", filepath.Join(m.CAROOT, rootName))
out, err := cmd.CombinedOutput()
fatalIfCmdErr(err, "certutil -A", out)
}) == 0 {
log.Printf("ERROR: no %s security databases found", NSSBrowsers)
return false
}
if !m.checkNSS() {
log.Printf("Installing in %s failed. Please report the issue with details about your environment at https://github.com/FiloSottile/mkcert/issues/new 👎", NSSBrowsers)
log.Printf("Note that if you never started %s, you need to do that at least once.", NSSBrowsers)
return false
}
return true
}
func (m *mkcert) uninstallNSS() {
m.forEachNSSProfile(func(profile string) {
err := exec.Command(certutilPath, "-V", "-d", profile, "-u", "L", "-n", m.caUniqueName()).Run()
if err != nil {
return
}
cmd := exec.Command(certutilPath, "-D", "-d", profile, "-n", m.caUniqueName())
out, err := cmd.CombinedOutput()
fatalIfCmdErr(err, "certutil -D", out)
})
}
func (m *mkcert) forEachNSSProfile(f func(profile string)) (found int) {
profiles, _ := filepath.Glob(FirefoxProfile)
profiles = append(profiles, nssDBs...)
for _, profile := range profiles {
if stat, err := os.Stat(profile); err != nil || !stat.IsDir() {
continue
}
if pathExists(filepath.Join(profile, "cert9.db")) {
f("sql:" + profile)
found++
} else if pathExists(filepath.Join(profile, "cert8.db")) {
f("dbm:" + profile)
found++
}
}
return
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
std/digiwallet.go
|
package digiwallet
import (
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
)
const baseURL = "https://transaction.digiwallet.nl"
// GetRTLO is a helper function meant for test functions.
// Returns the RTLO ENV value or the default test RTLO.
func GetRTLO() string {
rtlo := os.Getenv("RTLO")
if rtlo == "" {
rtlo = "143835"
}
return rtlo
}
// URL checks and corrects the provided URL, so that it complies
// with how the API wants URLs to be formed.
func URL(s string) (string, error) {
l := len(s)
if l == 0 {
return "", fmt.Errorf("url is empty")
}
if s[l-1] != '/' {
s = s + "/"
}
u, err := url.Parse(s)
if err != nil {
return "", nil
}
return u.String(), nil
}
// API contains data and functions to interact with the Digiwallet API
type API struct {
// RTLO is the digiwallet layout code
RTLO string
// URL is the URL used to communicate with the API
URL string
// Test enables global test mode
Test bool
// Logger is used for logging debug output
Logger *log.Logger
// Verbose enables verbose debugging output
Verbose bool
}
// New initializes and returns an API.
// Takes the RTLO outlet code and whether the global test mode is to be enabled.
func New(rtlo string, test bool) *API {
return &API{
RTLO: rtlo,
URL: baseURL,
Test: test,
Logger: log.New(os.Stdout, "Digiwallet SDK:", log.Ldate|log.Ltime|log.Lmicroseconds|log.Llongfile),
}
}
// Debug prints debug output when verbose mode is on and a logger had been provided.
func (a *API) Debug(v interface{}) {
if a.Logger != nil && a.Verbose {
a.Logger.Print(v)
}
}
// Debugf prints formatted debug output when verbose mode is on and a logger had been provided.
func (a *API) Debugf(format string, v interface{}) {
if a.Logger != nil && a.Verbose {
a.Logger.Printf(format, v)
}
}
// Request executes a request tp the Digiwallet API via the provided endpoint URL,
// returns the raw response.
func (a *API) Request(uri *url.URL) (*http.Response, error) {
q := uri.Query()
// Enable test mode when global test mode is on.
if a.Test {
q.Set("test", "1")
}
// Make sure that returnurl is valid and API compatible, if provided
if url := q.Get("returnurl"); url != "" {
u, err := URL(url)
if err != nil {
return nil, err
}
q.Set("returnurl", u)
}
// Make sure that cancelurl is valid and API compatible, if provided
if url := q.Get("cancelurl"); url != "" {
u, err := URL(url)
if err != nil {
return nil, err
}
q.Set("cancelurl", u)
}
// Make sure that reporturl is valid and API compatible, if provided
if url := q.Get("reporturl"); url != "" {
u, err := URL(url)
if err != nil {
return nil, err
}
q.Set("reporturl", u)
}
uri.RawQuery = q.Encode()
// Temporary check for mandatory test mode as long as this SDK is being developed.
// TODO: Remove this check before production
if uri.Query().Get("test") == "" {
return nil, fmt.Errorf("SDK only supports test environment for now")
}
a.Debugf("making call to: %v\n", uri.String())
// Prepare the request
req, err := http.NewRequest(http.MethodGet, uri.String(), nil)
if err != nil {
return nil, err
}
// Create a client and execute the request.
client := http.Client{}
return client.Do(req)
}
// GetJSON executes an API request and json decodes the result to v.
// Takes the short endpoint path, i.e. "/ideal/start".
func (a *API) GetJSON(path string, v interface{}) error {
// Ensure a valid endpoint URL
uri, err := url.Parse(a.URL + path)
if err != nil {
return err
}
// Execute the request
res, err := a.Request(uri)
if err != nil {
return err
}
defer res.Body.Close()
// Return the json decoded response
return json.NewDecoder(res.Body).Decode(v)
}
// GetXML executes an API request and xml decodes the result to v.
// Takes the short endpoint path, i.e. "/ideal/getissuers".
func (a *API) GetXML(path string, v interface{}) error {
// Ensure a valid endpoint URL
uri, err := url.Parse(a.URL + path)
if err != nil {
return err
}
// Make the request
res, err := a.Request(uri)
if err != nil {
return err
}
defer res.Body.Close()
// Return the xml decoded response
return xml.NewDecoder(res.Body).Decode(v)
}
// Do executes a request tp the Digiwallet API via the provided endpoint URL
// and processes the API response codes in a Go compatible way.
func (a *API) Do(url *url.URL) (string, error) {
// Execute the request
res, err := a.Request(url)
if err != nil {
return "", err
}
defer res.Body.Close()
// Read the response
b, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
// Process the response
return ProcessResponse(string(b))
}
// Issuer is an IDEAL issuer
type Issuer struct {
Name string `xml:",innerxml"`
ID string `xml:"id,attr"`
}
// Issuers is the XML representation of a list of issuers
type issuers struct {
XMLName xml.Name `xml:"issuers"`
Issuers []Issuer `xml:"issuer"`
}
// GetIdealIssuers returns a list of banks accepting IDEAL payments.
func (a *API) GetIdealIssuers() ([]Issuer, error) {
var i issuers
if err := a.GetXML("/ideal/getissuers?ver=4&format=xml", &i); err != nil {
return nil, err
}
return i.Issuers, nil
}
// TransactionStarter is a type with functionality to start a payment transaction.
type TransactionStarter interface {
Start(*API) error
}
// StartTransaction starts a peyment transaction at the Digiwallet API.
// Takes any type implementing the TransactionStarter interface.
func (a *API) StartTransaction(t TransactionStarter) error {
return t.Start(a)
}
// TransactionChecker is a type with functionality to check the status of a
// payment transaction.
type TransactionChecker interface {
Check(*API, bool) error
}
// CheckTransaction cheacks a peyment transaction at the Digiwallet API.
// Takes any type implementing the TransactionChecker interface.
func (a *API) CheckTransaction(t TransactionChecker, once bool) error {
return t.Check(a, once)
}
|
[
"\"RTLO\""
] |
[] |
[
"RTLO"
] |
[]
|
["RTLO"]
|
go
| 1 | 0 | |
drivers/vmwarefusion/vmrun_darwin.go
|
/*
* Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License.
*/
package vmwarefusion
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"github.com/hsartoris-bard/machine/libmachine/log"
)
var (
vmrunbin = setVmwareCmd("vmrun")
vdiskmanbin = setVmwareCmd("vmware-vdiskmanager")
)
var (
ErrMachineExist = errors.New("machine already exists")
ErrMachineNotExist = errors.New("machine does not exist")
ErrVMRUNNotFound = errors.New("VMRUN not found")
)
// detect the vmrun and vmware-vdiskmanager cmds' path if needed
func setVmwareCmd(cmd string) string {
if path, err := exec.LookPath(cmd); err == nil {
return path
}
return filepath.Join("/Applications/VMware Fusion.app/Contents/Library/", cmd)
}
func vmrun(args ...string) (string, string, error) {
// vmrun with nogui on VMware Fusion through at least 8.0.1 doesn't work right
// if the umask is set to not allow world-readable permissions
_ = syscall.Umask(022)
cmd := exec.Command(vmrunbin, args...)
if os.Getenv("MACHINE_DEBUG") != "" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout, cmd.Stderr = &stdout, &stderr
log.Debugf("executing: %v %v", vmrunbin, strings.Join(args, " "))
err := cmd.Run()
if err != nil {
if ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound {
err = ErrVMRUNNotFound
}
}
return stdout.String(), stderr.String(), err
}
// Make a vmdk disk image with the given size (in MB).
func vdiskmanager(dest string, size int) error {
cmd := exec.Command(vdiskmanbin, "-c", "-t", "0", "-s", fmt.Sprintf("%dMB", size), "-a", "lsilogic", dest)
if os.Getenv("MACHINE_DEBUG") != "" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if stdout := cmd.Run(); stdout != nil {
if ee, ok := stdout.(*exec.Error); ok && ee == exec.ErrNotFound {
return ErrVMRUNNotFound
}
}
return nil
}
|
[
"\"MACHINE_DEBUG\"",
"\"MACHINE_DEBUG\""
] |
[] |
[
"MACHINE_DEBUG"
] |
[]
|
["MACHINE_DEBUG"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"strconv"
"time"
)
const DEFAULT_DELAY = "1"
const DEFAULT_PORT = "8080"
func main() {
var delayStr = os.Getenv("DELAY")
if delayStr == "" {
delayStr = DEFAULT_DELAY
}
delayInt, err := strconv.Atoi(delayStr)
if err != nil {
log.Fatal(err)
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Println(r)
time.Sleep(time.Duration(delayInt) * time.Second)
fmt.Fprintln(w, "Final response after ", delayInt, " seconds")
})
port := os.Getenv("PORT")
if port == "" {
port = DEFAULT_PORT
}
fmt.Println("Started, serving at ", port)
err = http.ListenAndServe(":"+port, nil)
if err != nil {
panic("ListenAndServe: " + err.Error())
}
}
|
[
"\"DELAY\"",
"\"PORT\""
] |
[] |
[
"DELAY",
"PORT"
] |
[]
|
["DELAY", "PORT"]
|
go
| 2 | 0 | |
integration/hsm/hsm_test.go
|
// Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"testing"
"time"
"github.com/google/uuid"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/client"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/auth/keystore"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/backend/lite"
"github.com/gravitational/teleport/lib/modules"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/utils"
"github.com/jonboulle/clockwork"
"github.com/gravitational/trace"
"github.com/stretchr/testify/require"
)
// ports contains tcp ports allocated for all integration tests.
var ports utils.PortList
func init() {
// Allocate tcp ports for all HSM integration tests. Don't overlap with
// ports used by integration_test.go.
var err error
ports, err = utils.GetFreeTCPPorts(100, utils.PortStartingNumber+5000)
if err != nil {
panic(fmt.Sprintf("failed to allocate tcp ports for tests: %v", err))
}
}
type teleportService struct {
name string
log utils.Logger
config *service.Config
process *service.TeleportProcess
serviceChannel chan *service.TeleportProcess
errorChannel chan error
}
func newTeleportService(config *service.Config, name string) *teleportService {
return &teleportService{
config: config,
name: name,
log: config.Log,
serviceChannel: make(chan *service.TeleportProcess, 1),
errorChannel: make(chan error, 1),
}
}
func (t *teleportService) start(ctx context.Context) {
go func() {
t.errorChannel <- service.Run(ctx, *t.config, func(cfg *service.Config) (service.Process, error) {
t.log.Debugf("(Re)starting %s", t.name)
svc, err := service.NewTeleport(cfg)
if err == nil {
t.log.Debugf("started %s, writing to serviceChannel", t.name)
t.serviceChannel <- svc
}
return svc, trace.Wrap(err)
})
}()
}
func (t *teleportService) waitForStart(ctx context.Context) error {
t.log.Debugf("Waiting for %s to start", t.name)
t.start(ctx)
select {
case t.process = <-t.serviceChannel:
case err := <-t.errorChannel:
return trace.Wrap(err)
case <-ctx.Done():
return trace.Wrap(ctx.Err(), "timed out waiting for %s to start", t.name)
}
t.log.Debugf("read %s from serviceChannel", t.name)
return t.waitForReady(ctx)
}
func (t *teleportService) waitForReady(ctx context.Context) error {
t.log.Debugf("Waiting for %s to be ready", t.name)
eventChannel := make(chan service.Event)
t.process.WaitForEvent(ctx, service.TeleportReadyEvent, eventChannel)
select {
case <-eventChannel:
case <-ctx.Done():
return trace.Wrap(ctx.Err(), "timed out waiting for %s to be ready", t.name)
}
// also wait for AuthIdentityEvent so that we can read the admin credentials
// and create a test client
if t.process.GetAuthServer() != nil {
t.process.WaitForEvent(ctx, service.AuthIdentityEvent, eventChannel)
select {
case <-eventChannel:
case <-ctx.Done():
return trace.Wrap(ctx.Err(), "timed out waiting for %s auth identity event", t.name)
}
t.log.Debugf("%s is ready", t.name)
}
return nil
}
func (t *teleportService) waitForRestart(ctx context.Context) error {
t.log.Debugf("Waiting for %s to restart", t.name)
// get the new process
select {
case t.process = <-t.serviceChannel:
case err := <-t.errorChannel:
return trace.Wrap(err)
case <-ctx.Done():
return trace.Wrap(ctx.Err(), "timed out waiting for %s to restart", t.name)
}
// wait for the new process to be ready
err := t.waitForReady(ctx)
if err != nil {
return trace.Wrap(err)
}
t.log.Debugf("%s successfully restarted", t.name)
return nil
}
func (t *teleportService) waitForShutdown(ctx context.Context) error {
t.log.Debugf("Waiting for %s to shut down", t.name)
select {
case err := <-t.errorChannel:
return trace.Wrap(err)
case <-ctx.Done():
return trace.Wrap(ctx.Err(), "timed out waiting for %s to shut down", t.name)
}
}
func (t *teleportService) waitForLocalAdditionalKeys(ctx context.Context) error {
t.log.Debugf("Waiting for %s to have local additional keys", t.name)
clusterName, err := t.process.GetAuthServer().GetClusterName()
if err != nil {
return trace.Wrap(err)
}
hostCAID := types.CertAuthID{DomainName: clusterName.GetClusterName(), Type: types.HostCA}
for {
select {
case <-ctx.Done():
return trace.Wrap(ctx.Err(), "timed out waiting for %s to have local additional keys", t.name)
case <-time.After(250 * time.Millisecond):
}
ca, err := t.process.GetAuthServer().GetCertAuthority(hostCAID, true)
if err != nil {
return trace.Wrap(err)
}
if t.process.GetAuthServer().GetKeyStore().HasLocalAdditionalKeys(ca) {
break
}
}
t.log.Debugf("%s has local additional keys", t.name)
return nil
}
func (t *teleportService) waitForPhaseChange(ctx context.Context) error {
t.log.Debugf("Waiting for %s to change phase", t.name)
eventC := make(chan service.Event, 1)
t.process.WaitForEvent(ctx, service.TeleportPhaseChangeEvent, eventC)
select {
case <-ctx.Done():
return trace.Wrap(ctx.Err(), "timed out waiting for %s to change phase", t.name)
case <-eventC:
}
t.log.Debugf("%s changed phase", t.name)
return nil
}
type TeleportServices []*teleportService
func (s TeleportServices) forEach(f func(t *teleportService) error) error {
for i := range s {
if err := f(s[i]); err != nil {
return trace.Wrap(err)
}
}
return nil
}
func (s TeleportServices) waitForStart(ctx context.Context) error {
return s.forEach(func(t *teleportService) error { return t.waitForStart(ctx) })
}
func (s TeleportServices) waitForRestart(ctx context.Context) error {
return s.forEach(func(t *teleportService) error { return t.waitForRestart(ctx) })
}
func (s TeleportServices) waitForLocalAdditionalKeys(ctx context.Context) error {
return s.forEach(func(t *teleportService) error { return t.waitForLocalAdditionalKeys(ctx) })
}
func (s TeleportServices) waitForPhaseChange(ctx context.Context) error {
return s.forEach(func(t *teleportService) error { return t.waitForPhaseChange(ctx) })
}
func newHSMAuthConfig(ctx context.Context, t *testing.T, storageConfig backend.Config, log utils.Logger) *service.Config {
hostName, err := os.Hostname()
require.NoError(t, err)
config := service.MakeDefaultConfig()
config.PollingPeriod = 1 * time.Second
config.SSH.Enabled = false
config.Proxy.Enabled = false
config.CachePolicy.Enabled = true
config.ClientTimeout = time.Second
config.ShutdownTimeout = time.Minute
config.DataDir = t.TempDir()
config.Auth.SSHAddr.Addr = net.JoinHostPort(hostName, ports.Pop())
config.Auth.PublicAddrs = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: hostName,
},
}
config.Auth.ClusterName, err = services.NewClusterNameWithRandomID(types.ClusterNameSpecV2{
ClusterName: "testcluster",
})
require.NoError(t, err)
config.AuthServers = append(config.AuthServers, config.Auth.SSHAddr)
config.Auth.StorageConfig = storageConfig
fakeClock := clockwork.NewFakeClock()
config.Clock = fakeClock
config.Auth.StaticTokens, err = types.NewStaticTokens(types.StaticTokensSpecV2{
StaticTokens: []types.ProvisionTokenV1{
{
Roles: []types.SystemRole{"Proxy", "Node"},
Token: "foo",
},
},
})
require.NoError(t, err)
go func() {
for {
select {
case <-time.After(10 * time.Millisecond):
fakeClock.Advance(100 * time.Millisecond)
case <-ctx.Done():
return
}
}
}()
config.Auth.KeyStore = keystore.SetupSoftHSMTest(t)
config.Log = log
return config
}
func newProxyConfig(ctx context.Context, t *testing.T, authAddr utils.NetAddr, log utils.Logger) *service.Config {
hostName, err := os.Hostname()
require.NoError(t, err)
config := service.MakeDefaultConfig()
config.PollingPeriod = 1 * time.Second
config.Token = "foo"
config.SSH.Enabled = true
config.SSH.Addr.Addr = net.JoinHostPort(hostName, ports.Pop())
config.Auth.Enabled = false
config.Proxy.Enabled = true
config.Proxy.DisableWebInterface = true
config.Proxy.DisableWebService = true
config.Proxy.DisableReverseTunnel = true
config.Proxy.SSHAddr.Addr = net.JoinHostPort(hostName, ports.Pop())
config.Proxy.WebAddr.Addr = net.JoinHostPort(hostName, ports.Pop())
config.CachePolicy.Enabled = true
config.PollingPeriod = 500 * time.Millisecond
config.ClientTimeout = time.Second
config.ShutdownTimeout = time.Minute
config.DataDir = t.TempDir()
require.NoError(t, err)
config.AuthServers = append(config.AuthServers, authAddr)
fakeClock := clockwork.NewFakeClock()
config.Clock = fakeClock
go func() {
for {
select {
case <-time.After(10 * time.Millisecond):
fakeClock.Advance(100 * time.Millisecond)
case <-ctx.Done():
return
}
}
}()
config.Log = log
return config
}
// Tests a single CA rotation with a single HSM auth server
func TestHSMRotation(t *testing.T) {
if os.Getenv("SOFTHSM2_PATH") == "" {
t.Skip("Skipping test as SOFTHSM2_PATH is not set")
}
modules.SetTestModules(t, &modules.TestModules{
TestBuildType: modules.BuildEnterprise,
TestFeatures: modules.Features{
HSM: true,
},
})
// pick a conservative timeout
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
t.Cleanup(cancel)
log := utils.NewLoggerForTests()
storageConfig := backend.Config{
Type: lite.GetName(),
Params: backend.Params{
"path": t.TempDir(),
"poll_stream_period": 50 * time.Millisecond,
},
}
var err error
log.Debug("TestHSMRotation: starting auth server")
authConfig := newHSMAuthConfig(ctx, t, storageConfig, log)
auth1 := newTeleportService(authConfig, "auth1")
t.Cleanup(func() {
require.NoError(t, auth1.process.GetAuthServer().GetKeyStore().DeleteUnusedKeys(nil))
require.NoError(t, auth1.process.Close())
})
teleportServices := TeleportServices{auth1}
log.Debug("TestHSMRotation: waiting for auth server to start")
require.NoError(t, auth1.waitForStart(ctx))
// start a proxy to make sure it can get creds at each stage of rotation
log.Debug("TestHSMRotation: starting proxy")
proxy := newTeleportService(newProxyConfig(ctx, t, authConfig.Auth.SSHAddr, log), "proxy")
require.NoError(t, proxy.waitForStart(ctx))
teleportServices = append(teleportServices, proxy)
log.Debug("TestHSMRotation: sending rotation request init")
err = auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: types.RotationPhaseInit,
Mode: types.RotationModeManual,
})
require.NoError(t, err)
require.NoError(t, teleportServices.waitForPhaseChange(ctx))
log.Debug("TestHSMRotation: sending rotation request update_clients")
err = auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: types.RotationPhaseUpdateClients,
Mode: types.RotationModeManual,
})
require.NoError(t, err)
require.NoError(t, teleportServices.waitForRestart(ctx))
log.Debug("TestHSMRotation: sending rotation request update_servers")
err = auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: types.RotationPhaseUpdateServers,
Mode: types.RotationModeManual,
})
require.NoError(t, err)
require.NoError(t, teleportServices.waitForRestart(ctx))
log.Debug("TestHSMRotation: sending rotation request standby")
err = auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: types.RotationPhaseStandby,
Mode: types.RotationModeManual,
})
require.NoError(t, err)
require.NoError(t, teleportServices.waitForRestart(ctx))
}
// Tests multiple CA rotations and rollbacks with 2 HSM auth servers in an HA configuration
func TestHSMDualAuthRotation(t *testing.T) {
if os.Getenv("TELEPORT_ETCD_TEST") == "" || os.Getenv("SOFTHSM2_PATH") == "" {
t.Skip("Skipping test as either etcd or SoftHSM2 is not enabled")
}
modules.SetTestModules(t, &modules.TestModules{
TestBuildType: modules.BuildEnterprise,
TestFeatures: modules.Features{
HSM: true,
},
})
// pick a conservative timeout
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Minute)
t.Cleanup(cancel)
log := utils.NewLoggerForTests()
backendPrefix := uuid.NewString()
storageConfig := backend.Config{
Type: "etcd",
Params: backend.Params{
"peers": []string{"https://127.0.0.1:2379"},
"prefix": backendPrefix,
"tls_key_file": "../../examples/etcd/certs/client-key.pem",
"tls_cert_file": "../../examples/etcd/certs/client-cert.pem",
"tls_ca_file": "../../examples/etcd/certs/ca-cert.pem",
},
}
var err error
// start a cluster with 1 auth server and a proxy
log.Debug("TestHSMDualAuthRotation: Starting auth server 1")
auth1Config := newHSMAuthConfig(ctx, t, storageConfig, log)
auth1 := newTeleportService(auth1Config, "auth1")
t.Cleanup(func() {
require.NoError(t, auth1.process.GetAuthServer().GetKeyStore().DeleteUnusedKeys(nil))
require.NoError(t, auth1.process.Close())
})
authServices := TeleportServices{auth1}
teleportServices := append(TeleportServices{}, authServices...)
require.NoError(t, authServices.waitForStart(ctx))
t.Cleanup(func() {
// clean up the etcd backend
bk := auth1.process.GetBackend()
err := bk.DeleteRange(context.Background(), []byte(backendPrefix),
backend.RangeEnd([]byte(backendPrefix)))
require.NoError(t, err)
})
log.Debug("TestHSMDualAuthRotation: Starting load balancer")
hostName, err := os.Hostname()
require.NoError(t, err)
authAddr := utils.MustParseAddr(net.JoinHostPort(hostName, ports.Pop()))
lb, err := utils.NewLoadBalancer(ctx, *authAddr, auth1Config.Auth.SSHAddr)
require.NoError(t, err)
require.NoError(t, lb.Listen())
go lb.Serve()
t.Cleanup(func() { require.NoError(t, lb.Close()) })
// start a proxy to make sure it can get creds at each stage of rotation
log.Debug("TestHSMDualAuthRotation: Starting proxy")
proxyConfig := newProxyConfig(ctx, t, *authAddr, log)
proxy := newTeleportService(proxyConfig, "proxy")
require.NoError(t, proxy.waitForStart(ctx))
teleportServices = append(teleportServices, proxy)
// add a new auth server
log.Debug("TestHSMDualAuthRotation: Starting auth server 2")
auth2Config := newHSMAuthConfig(ctx, t, storageConfig, log)
auth2 := newTeleportService(auth2Config, "auth2")
require.NoError(t, auth2.waitForStart(ctx))
t.Cleanup(func() {
require.NoError(t, auth2.process.GetAuthServer().GetKeyStore().DeleteUnusedKeys(nil))
require.NoError(t, auth2.process.Close())
})
authServices = append(authServices, auth2)
teleportServices = append(teleportServices, auth2)
// make sure the admin identity used by tctl works
getAdminClient := func() *auth.Client {
identity, err := auth.ReadLocalIdentity(
filepath.Join(auth2Config.DataDir, teleport.ComponentProcess),
auth.IdentityID{Role: types.RoleAdmin, HostUUID: auth2Config.HostUUID})
require.NoError(t, err)
tlsConfig, err := identity.TLSConfig(nil)
require.NoError(t, err)
authAddrs := []utils.NetAddr{auth2Config.Auth.SSHAddr}
clt, err := auth.NewClient(client.Config{
Addrs: utils.NetAddrsToStrings(authAddrs),
Credentials: []client.Credentials{
client.LoadTLS(tlsConfig),
},
})
require.NoError(t, err)
return clt
}
testClient := func(clt *auth.Client) error {
_, err = clt.GetClusterName()
return err
}
clt := getAdminClient()
require.NoError(t, testClient(clt))
stages := []struct {
targetPhase string
verify func(t *testing.T)
}{
{
targetPhase: types.RotationPhaseInit,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForPhaseChange(ctx))
require.NoError(t, authServices.waitForLocalAdditionalKeys(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseUpdateClients,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseUpdateServers,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseStandby,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
},
},
}
// do a full rotation
for _, stage := range stages {
log.Debugf("TestHSMDualAuthRotation: Sending rotate request %s", stage.targetPhase)
require.NoError(t, auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: stage.targetPhase,
Mode: types.RotationModeManual,
}))
stage.verify(t)
}
// Safe to send traffic to new auth server now that a full rotation has been completed.
lb.AddBackend(auth2Config.Auth.SSHAddr)
// load balanced client shoud work with either backend
getAdminClient = func() *auth.Client {
identity, err := auth.ReadLocalIdentity(
filepath.Join(auth2Config.DataDir, teleport.ComponentProcess),
auth.IdentityID{Role: types.RoleAdmin, HostUUID: auth2Config.HostUUID})
require.NoError(t, err)
tlsConfig, err := identity.TLSConfig(nil)
require.NoError(t, err)
authAddrs := []string{lb.Addr().String()}
clt, err := auth.NewClient(client.Config{
Addrs: authAddrs,
Credentials: []client.Credentials{
client.LoadTLS(tlsConfig),
},
})
require.NoError(t, err)
return clt
}
testClient = func(clt *auth.Client) error {
_, err1 := clt.GetClusterName()
_, err2 := clt.GetClusterName()
return trace.NewAggregate(err1, err2)
}
clt = getAdminClient()
require.NoError(t, testClient(clt))
// Do another full rotation from the new auth server
for _, stage := range stages {
log.Debugf("TestHSMDualAuthRotation: Sending rotate request %s", stage.targetPhase)
require.NoError(t, auth2.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: stage.targetPhase,
Mode: types.RotationModeManual,
}))
stage.verify(t)
}
// test rollbacks
stages = []struct {
targetPhase string
verify func(t *testing.T)
}{
{
targetPhase: types.RotationPhaseInit,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForPhaseChange(ctx))
require.NoError(t, authServices.waitForLocalAdditionalKeys(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseRollback,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseStandby,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseInit,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForPhaseChange(ctx))
require.NoError(t, authServices.waitForLocalAdditionalKeys(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseUpdateClients,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseRollback,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseStandby,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseInit,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForPhaseChange(ctx))
require.NoError(t, authServices.waitForLocalAdditionalKeys(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseUpdateClients,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseUpdateServers,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseRollback,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseStandby,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
}
for _, stage := range stages {
log.Debugf("TestHSMDualAuthRotation: Sending rotate request %s", stage.targetPhase)
require.NoError(t, auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: stage.targetPhase,
Mode: types.RotationModeManual,
}))
stage.verify(t)
}
}
// Tests a dual-auth server migration from raw keys to HSM keys
func TestHSMMigrate(t *testing.T) {
if os.Getenv("TELEPORT_ETCD_TEST") == "" || os.Getenv("SOFTHSM2_PATH") == "" {
t.Skip("Skipping test as either etcd or SoftHSM2 is not enabled")
}
modules.SetTestModules(t, &modules.TestModules{
TestBuildType: modules.BuildEnterprise,
TestFeatures: modules.Features{
HSM: true,
},
})
// pick a conservative timeout
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Minute)
t.Cleanup(cancel)
log := utils.NewLoggerForTests()
backendPrefix := uuid.NewString()
storageConfig := backend.Config{
Type: "etcd",
Params: backend.Params{
"peers": []string{"https://127.0.0.1:2379"},
"prefix": backendPrefix,
"tls_key_file": "../../examples/etcd/certs/client-key.pem",
"tls_cert_file": "../../examples/etcd/certs/client-cert.pem",
"tls_ca_file": "../../examples/etcd/certs/ca-cert.pem",
},
}
var err error
// start a dual auth non-hsm cluster
log.Debug("TestHSMMigrate: Starting auth server 1")
auth1Config := newHSMAuthConfig(ctx, t, storageConfig, log)
auth1Config.Auth.KeyStore = keystore.Config{}
auth1 := newTeleportService(auth1Config, "auth1")
t.Cleanup(func() {
require.NoError(t, auth1.process.Close())
})
auth2Config := newHSMAuthConfig(ctx, t, storageConfig, log)
auth2Config.Auth.KeyStore = keystore.Config{}
auth2 := newTeleportService(auth2Config, "auth2")
t.Cleanup(func() {
require.NoError(t, auth2.process.Close())
})
require.NoError(t, auth1.waitForStart(ctx))
require.NoError(t, auth2.waitForStart(ctx))
t.Cleanup(func() {
// clean up the etcd backend
bk := auth1.process.GetBackend()
err := bk.DeleteRange(context.Background(), []byte(backendPrefix),
backend.RangeEnd([]byte(backendPrefix)))
require.NoError(t, err)
})
log.Debug("TestHSMMigrate: Starting load balancer")
hostName, err := os.Hostname()
require.NoError(t, err)
authAddr := utils.MustParseAddr(net.JoinHostPort(hostName, ports.Pop()))
lb, err := utils.NewLoadBalancer(ctx, *authAddr, auth1Config.Auth.SSHAddr, auth2Config.Auth.SSHAddr)
require.NoError(t, err)
require.NoError(t, lb.Listen())
go lb.Serve()
t.Cleanup(func() { require.NoError(t, lb.Close()) })
// start a proxy to make sure it can get creds at each stage of migration
log.Debug("TestHSMMigrate: Starting proxy")
proxyConfig := newProxyConfig(ctx, t, *authAddr, log)
proxy := newTeleportService(proxyConfig, "proxy")
require.NoError(t, proxy.waitForStart(ctx))
t.Cleanup(func() {
require.NoError(t, proxy.process.Close())
})
// make sure the admin identity used by tctl works
getAdminClient := func() *auth.Client {
identity, err := auth.ReadLocalIdentity(
filepath.Join(auth2Config.DataDir, teleport.ComponentProcess),
auth.IdentityID{Role: types.RoleAdmin, HostUUID: auth2Config.HostUUID})
require.NoError(t, err)
tlsConfig, err := identity.TLSConfig(nil)
require.NoError(t, err)
authAddrs := []utils.NetAddr{auth2Config.Auth.SSHAddr}
clt, err := auth.NewClient(client.Config{
Addrs: utils.NetAddrsToStrings(authAddrs),
Credentials: []client.Credentials{
client.LoadTLS(tlsConfig),
},
})
require.NoError(t, err)
return clt
}
testClient := func(clt *auth.Client) error {
_, err1 := clt.GetClusterName()
_, err2 := clt.GetClusterName()
return trace.NewAggregate(err1, err2)
}
clt := getAdminClient()
require.NoError(t, testClient(clt))
// Phase 1: migrate auth1 to HSM
lb.RemoveBackend(auth1Config.Auth.SSHAddr)
auth1.process.Close()
require.NoError(t, auth1.waitForShutdown(ctx))
auth1Config.Auth.KeyStore = keystore.SetupSoftHSMTest(t)
auth1 = newTeleportService(auth1Config, "auth1")
require.NoError(t, auth1.waitForStart(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
authServices := TeleportServices{auth1, auth2}
teleportServices := TeleportServices{auth1, auth2, proxy}
stages := []struct {
targetPhase string
verify func(t *testing.T)
}{
{
targetPhase: types.RotationPhaseInit,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForPhaseChange(ctx))
require.NoError(t, authServices.waitForLocalAdditionalKeys(ctx))
clt := getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseUpdateClients,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseUpdateServers,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
},
},
{
targetPhase: types.RotationPhaseStandby,
verify: func(t *testing.T) {
require.NoError(t, teleportServices.waitForRestart(ctx))
clt = getAdminClient()
require.NoError(t, testClient(clt))
},
},
}
// do a full rotation
for _, stage := range stages {
log.Debugf("TestHSMMigrate: Sending rotate request %s", stage.targetPhase)
require.NoError(t, auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: stage.targetPhase,
Mode: types.RotationModeManual,
}))
stage.verify(t)
}
// Safe to send traffic to new auth1 again
lb.AddBackend(auth1Config.Auth.SSHAddr)
// Phase 2: migrate auth2 to HSM
lb.RemoveBackend(auth2Config.Auth.SSHAddr)
auth2.process.Close()
require.NoError(t, auth2.waitForShutdown(ctx))
auth2Config.Auth.KeyStore = keystore.SetupSoftHSMTest(t)
auth2 = newTeleportService(auth2Config, "auth2")
require.NoError(t, auth2.waitForStart(ctx))
authServices = TeleportServices{auth1, auth2}
teleportServices = TeleportServices{auth1, auth2, proxy}
clt = getAdminClient()
require.NoError(t, testClient(clt))
// do a full rotation
for _, stage := range stages {
log.Debugf("TestHSMMigrate: Sending rotate request %s", stage.targetPhase)
require.NoError(t, auth1.process.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
Type: types.HostCA,
TargetPhase: stage.targetPhase,
Mode: types.RotationModeManual,
}))
stage.verify(t)
}
// Safe to send traffic to new auth2 again
lb.AddBackend(auth2Config.Auth.SSHAddr)
require.NoError(t, testClient(clt))
}
|
[
"\"SOFTHSM2_PATH\"",
"\"TELEPORT_ETCD_TEST\"",
"\"SOFTHSM2_PATH\"",
"\"TELEPORT_ETCD_TEST\"",
"\"SOFTHSM2_PATH\""
] |
[] |
[
"SOFTHSM2_PATH",
"TELEPORT_ETCD_TEST"
] |
[]
|
["SOFTHSM2_PATH", "TELEPORT_ETCD_TEST"]
|
go
| 2 | 0 | |
.gitlab/syncer/sync.go
|
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/google/go-github/v32/github"
"github.com/sirupsen/logrus"
"github.com/xanzy/go-gitlab"
"net/http"
"net/url"
"os"
"time"
)
var log = logrus.New()
const (
LinbitCA = `-----BEGIN CERTIFICATE-----
MIIDkjCCAnqgAwIBAgIBGjANBgkqhkiG9w0BAQsFADAeMQswCQYDVQQGEwJhdDEPMA0GA1UEChMGTElOQklUMB4XDTIwMDMyNDEwNDIwMFoXDTIxMDMyNDEwNDIwMFowWTELMAkGA1UEBhMCQVQxDzANBgNVBAoTBkxJTkJJVDEWMBQGA1UEAxMNZ2l0bGFiLmxpbmJpdDEhMB8GCSqGSIb3DQEJARYSdGVjaG5pa0BsaW5iaXQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkbvKHtaM526w1/+TrYYxDvPjK0Kfd82+iXk2hFjIH8qyOGkPwGJBlWM+l5GgXI63mEqSkoGpcUCdxWHd6KM4+foN5e18/B60uuMMbFKagy5obnVWWNhtERSS3Ni3MpX1ZlpJGf7tWVUVQPE2Cw2nA5KdTuJAGwXckt4lIqs4zJz0rbJyvjF+OS0spIvryYm0c0DW/00NLQMrAs2BaFeBUTEVx1oP0LrdoeUZZfgy6KW28l3q9mt3WpTkFIATl/KLbH8exnTA8ML80AjLt/GXuNZjH6RbjnWNLqnu51/tDfUUBba7i98nK6RSIu09TcEHl0NzvEnCTvHimp3vcbATkQIDAQABo4GfMIGcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFERTVySH0KENKQ2njrSBPXUjuVSEMAsGA1UdDwQEAwIF4DATBgNVHSUEDDAKBggrBgEFBQcDATAYBgNVHREEETAPgg1naXRsYWIubGluYml0MBEGCWCGSAGG+EIBAQQEAwIGQDAeBglghkgBhvhCAQ0EERYPeGNhIGNlcnRpZmljYXRlMA0GCSqGSIb3DQEBCwUAA4IBAQAQgMs/uqWvDOHmFXonXSX0PZbZ+ktmhWdIrXqahdLH2aCS0ndmmijUyHSRG9ivgCgOwdorIy4QIpU7HR/ND8BJDj9TL8x7xfq4WCCtdp54zU18kdoqPJ2/YqhI8cAEiW68X+B83oZw/UpWXymf6Z4oSxPZWBauSGhcvTH++mBC7g0pJQGpl58flRJNVu+E6x2b4SW+8oh6bIFRKOThj/wNAFs2iz/tgHrDvDpEjYNxOdI3OubMB1wv53lhKLW+/VI/qu8OLX5fN3Q2g1uJA3QOWqoTmnV72LI7EeMi9/iq+mEWiK27Bq68+km+rJk02vq97e+PZN6hQznY6HiIf2aK
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDKTCCAhGgAwIBAgIBATANBgkqhkiG9w0BAQ0FADAeMQswCQYDVQQGEwJhdDEPMA0GA1UEChMGTElOQklUMB4XDTE3MDYyMzExNDQwMFoXDTM3MDYyMzExNDQwMFowHjELMAkGA1UEBhMCYXQxDzANBgNVBAoTBkxJTkJJVDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALifmUvrAWFkYMvtNJdZ3woWeqmcL+gdeVBiBdbdM8oPzaJHFWqJenXWgrUgWk9NBrjQMryjfnv+OUQ3DscxeOiEkgNfnadxOGjmb//HPCiQSgCzzqro/uhjuKTtSfVc6MmFl1ud0wUaMwcXqMFKa+x8/9AgajEgzMImy77QskbPFX7gii3cxUY7s3PmgKenbSXNmw04bHnHUrT/J9UR67wJd9XQs1rK5EcwXXDEXceq6h56S1d17bDBIHh7snnSyuq1yBYecTH8SG+bKGMr/kHKtJdwyaeBNimajj7Hx5nyliS6d2GeprPOhehIVV1PQWh8CCWi8fKtul76fCEaoy0CAwEAAaNyMHAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUC2+4fdA6I5A5ftH9pAaqrj1L4MkwCwYDVR0PBAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAeBglghkgBhvhCAQ0EERYPeGNhIGNlcnRpZmljYXRlMA0GCSqGSIb3DQEBDQUAA4IBAQBM36FkhyNqCnocwAf/hWEQbKv+y1WprXV6uAgrmKGCTCOw5j312IhOs11omuDXqmW5Y9EmoDxbvL4oMee57wjiQfbNexZbfdxLf/1Agy1LS7H40Zu3pOVGgYeQ9DZ2mvtti1WQFnh7yVYOT4D0IqkYwyN2Wn+jxaHpM97AfZKsr/FDDQMag7PO5yPwZnYtF/6X3ebRXl12/hFI3CSUBN5HJn/O/U5e7NDKUZKAaerPG5ZkNFr+Ur4E1vHVPMO2PsOYvFpnZ72YTpy0XLDIUOWM7I5n3gp+pntRPT2lu14ItRmuOPGGj7MpvEj2+FRebiwybKVn799qmfkxxVCwPSqI
-----END CERTIFICATE-----
`
SourceProjectOwner = "piraeusdatastore"
SourceProjectRepo = "piraeus-operator"
SubprojectPath = "piraeus-operator"
TargetBase = "https://gitlab.linbit"
TargetBranch = "master"
TargetProject = "kubernetes/linstor-operator-builder"
// Minimum number of approvals required for a PR from external people to be synced
MinApprovalsForSync = 1
)
func linbitHttpClient() (*http.Client, error) {
pool := x509.NewCertPool()
ok := pool.AppendCertsFromPEM([]byte(LinbitCA))
if !ok {
return nil, fmt.Errorf("failed to load CA certificate")
}
return &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: pool,
},
},
}, nil
}
func main() {
log.Level = logrus.DebugLevel
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
gitlabHttpClient, err := linbitHttpClient()
if err != nil {
log.WithField("err", err).Fatal("failed to load http client")
}
log.Info("setting up clients")
gitlabToken := os.Getenv("GITLAB_TOKEN")
// We only access public information on github
ghClient := github.NewClient(nil)
// For gitlab we need read and write permissions permissions
glClient, err := gitlab.NewClient(gitlabToken, gitlab.WithBaseURL(TargetBase), gitlab.WithHTTPClient(gitlabHttpClient))
if err != nil {
log.WithField("err", err).Fatal("gitlab connection failed")
}
log.Info("fetch open pull requests")
openSrcPulls, _, err := ghClient.PullRequests.List(ctx, SourceProjectOwner, SourceProjectRepo, nil)
if err != nil {
log.WithField("err", err).Fatal("failed to fetch source PRs")
}
destProject, _, err := glClient.Projects.GetProject(TargetProject, nil)
if err != nil {
log.WithField("err", err).Fatal("failed to get destination project")
}
log.Info("syncing all pull requests")
for _, srcPull := range openSrcPulls {
log := log.WithField("srcPull", srcPull)
shouldSync, err := shouldSyncPull(ctx, ghClient, srcPull)
if err != nil {
log.WithField("err", err).Fatal("failed to check approval status of pull request")
}
if !shouldSync {
log.Info("pull request does not meet sync criteria, skipping...")
continue
}
err = syncPull(ctx, srcPull, destProject.ID, glClient)
if err != nil {
log.WithField("err", err).Fatal("failed to sync source pull")
}
}
}
func shouldSyncPull(ctx context.Context, client *github.Client, pull *github.PullRequest) (bool, error) {
// "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE"
var NoReviewRequired = []string{
"COLLABORATOR",
"MEMBER",
"OWNER",
}
for _, assoc := range NoReviewRequired {
if pull.GetAuthorAssociation() == assoc {
return true, nil
}
}
// Check for reviews. We only want to sync merge requests that have an up-to-date approval
reviews, _, err := client.PullRequests.ListReviews(ctx, SourceProjectOwner, SourceProjectRepo, *pull.Number, nil)
if err != nil {
return false, err
}
upToDateAndApproved := 0
for _, review := range reviews {
if review.GetState() == "APPROVED" && review.GetCommitID() == pull.GetHead().GetSHA() {
upToDateAndApproved += 1
}
}
return upToDateAndApproved >= MinApprovalsForSync, nil
}
func syncPull(ctx context.Context, srcPull *github.PullRequest, destProjectID int, destClient *gitlab.Client) error {
branchName := formatBranchName(*srcPull.Number)
srcSHA := srcPull.GetHead().GetSHA()
log := logrus.WithFields(logrus.Fields{
"srcPull.ID": srcPull.ID,
"branchName": branchName,
"srcSHA": srcSHA,
})
log.Info("syncing pull to branch")
branch, resp, err := destClient.Branches.GetBranch(destProjectID, branchName, gitlab.WithContext(ctx))
// 404 is expected here
if err != nil && (resp == nil || resp.StatusCode != 404) {
log.WithField("err", err).Info("failed to get branch")
return err
}
if branch == nil {
log.Info("new branch will be created")
sourceBranch := "master"
createdBranch, _, err := destClient.Branches.CreateBranch(destProjectID, &gitlab.CreateBranchOptions{
Branch: &branchName,
Ref: &sourceBranch,
}, gitlab.WithContext(ctx))
if err != nil {
log.WithField("err", err).Info("failed to create new branch")
return err
}
branch = createdBranch
}
log.Info("check if submodule is up-to-date with upstream")
file, _, err := destClient.RepositoryFiles.GetFile(destProjectID, SubprojectPath, &gitlab.GetFileOptions{
Ref: &branchName,
}, gitlab.WithContext(ctx))
if err != nil {
log.WithField("err", err).Info("failed to fetch submodule information")
return err
}
if file.BlobID != srcSHA {
log.Info("update submodule with newest commit from upstream")
err := updateSubmodule(destClient, destProjectID, branchName, SubprojectPath, srcSHA, gitlab.WithContext(ctx))
if err != nil {
log.WithField("err", err).Info("failed to update submodule")
return err
}
}
log.Info("check if merge request exists")
mrs, _, err := destClient.MergeRequests.ListProjectMergeRequests(destProjectID, &gitlab.ListProjectMergeRequestsOptions{
SourceBranch: &branchName,
}, gitlab.WithContext(ctx))
if err != nil {
log.WithField("err", err).Info("Failed to fetch merge requests")
return err
}
if len(mrs) > 1 {
log.WithField("mrs", mrs).Info("found more than 1 merge request")
return err
}
if len(mrs) == 0 {
log.Info("create new MR")
title := "WIP: Upstream PR: " + *srcPull.Title
description := formatDescription(srcPull)
squash := true
removeBranch := true
allowCollab := true
target := TargetBranch
_, _, err := destClient.MergeRequests.CreateMergeRequest(destProjectID, &gitlab.CreateMergeRequestOptions{
SourceBranch: &branchName,
TargetBranch: &target,
Labels: &gitlab.Labels{"upstream"},
Title: &title,
Description: &description,
Squash: &squash,
AllowCollaboration: &allowCollab,
RemoveSourceBranch: &removeBranch,
})
if err != nil {
log.WithField("err", err).Info("Failed to create merge request")
return err
}
}
return nil
}
// update an existing submodule by setting it to a given commit
// https://docs.gitlab.com/ee/api/repository_submodules.html
func updateSubmodule(client *gitlab.Client, pid int, branch string, submodulePath string, updateSha string, options ...gitlab.RequestOptionFunc) error {
type SubmoduleOptions struct {
Branch *string `url:"branch,omitempty" json:"branch,omitempty"`
CommitSHA *string `url:"commit_sha,omitempty" json:"commit_sha,omitempty"`
CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"`
}
msg := fmt.Sprintf("Sync %s with upstream pull request", SubprojectPath)
path := fmt.Sprintf("/projects/%d/repository/submodules/%s", pid, url.PathEscape(submodulePath))
// Note: Even though the commit is most likely part of different repository than the normal upstream (i.e. it's from
// someones private fork), the commit is still accessible via the main repository:
// > In other words, commits in a pull request are available in a repository even before the pull request is merged
// src: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/checking-out-pull-requests-locally#modifying-an-inactive-pull-request-locally
req, err := client.NewRequest("PUT", path, &SubmoduleOptions{
Branch: &branch,
CommitSHA: &updateSha,
CommitMessage: &msg,
}, options)
if err != nil {
return err
}
_, err = client.Do(req, nil)
return err
}
func formatBranchName(nr int) string {
return fmt.Sprintf("piraeus-pull-%d", nr)
}
func formatDescription(upstream *github.PullRequest) string {
const template = `# [Source](%s)
%s
`
return fmt.Sprintf(template, *upstream.HTMLURL, *upstream.Body)
}
|
[
"\"GITLAB_TOKEN\""
] |
[] |
[
"GITLAB_TOKEN"
] |
[]
|
["GITLAB_TOKEN"]
|
go
| 1 | 0 | |
fileserver.go
|
package main
import (
"bufio"
"bytes"
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"errors"
"flag"
"fmt"
"github.com/astaxie/beego/httplib"
"github.com/deckarep/golang-set"
_ "github.com/eventials/go-tus"
"github.com/json-iterator/go"
"github.com/nfnt/resize"
"github.com/sjqzhang/googleAuthenticator"
log "github.com/sjqzhang/seelog"
"github.com/sjqzhang/tusd"
"github.com/sjqzhang/tusd/filestore"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
"image"
"image/jpeg"
"image/png"
"io"
"io/ioutil"
slog "log"
random "math/rand"
"mime/multipart"
"net"
"net/http"
_ "net/http/pprof"
"net/smtp"
"net/url"
"os"
"os/signal"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
)
var staticHandler http.Handler
var json = jsoniter.ConfigCompatibleWithStandardLibrary
var server *Server
var logacc log.LoggerInterface
var FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR}
var CONST_QUEUE_SIZE = 10000
var (
FileName string
ptr unsafe.Pointer
DOCKER_DIR = ""
STORE_DIR = STORE_DIR_NAME
CONF_DIR = CONF_DIR_NAME
LOG_DIR = LOG_DIR_NAME
DATA_DIR = DATA_DIR_NAME
STATIC_DIR = STATIC_DIR_NAME
LARGE_DIR_NAME = "haystack"
LARGE_DIR = STORE_DIR + "/haystack"
CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db"
CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db"
CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json"
CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json"
CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt"
logConfigStr = `
<seelog type="asynctimer" asyncinterval="1000" minlevel="trace" maxlevel="error">
<outputs formatid="common">
<buffered formatid="common" size="1048576" flushperiod="1000">
<rollingfile type="size" filename="{DOCKER_DIR}log/fileserver.log" maxsize="104857600" maxrolls="10"/>
</buffered>
</outputs>
<formats>
<format id="common" format="%Date %Time [%LEV] [%File:%Line] [%Func] %Msg%n" />
</formats>
</seelog>
`
logAccessConfigStr = `
<seelog type="asynctimer" asyncinterval="1000" minlevel="trace" maxlevel="error">
<outputs formatid="common">
<buffered formatid="common" size="1048576" flushperiod="1000">
<rollingfile type="size" filename="{DOCKER_DIR}log/access.log" maxsize="104857600" maxrolls="10"/>
</buffered>
</outputs>
<formats>
<format id="common" format="%Date %Time [%LEV] [%File:%Line] [%Func] %Msg%n" />
</formats>
</seelog>
`
)
const (
STORE_DIR_NAME = "files"
LOG_DIR_NAME = "log"
DATA_DIR_NAME = "data"
CONF_DIR_NAME = "conf"
STATIC_DIR_NAME = "static"
CONST_STAT_FILE_COUNT_KEY = "fileCount"
CONST_BIG_UPLOAD_PATH_SUFFIX = "/big/upload/"
CONST_STAT_FILE_TOTAL_SIZE_KEY = "totalSize"
CONST_Md5_ERROR_FILE_NAME = "errors.md5"
CONST_Md5_QUEUE_FILE_NAME = "queue.md5"
CONST_FILE_Md5_FILE_NAME = "files.md5"
CONST_REMOME_Md5_FILE_NAME = "removes.md5"
CONST_SMALL_FILE_SIZE = 1024 * 1024
CONST_MESSAGE_CLUSTER_IP = "Can only be called by the cluster ip or 127.0.0.1 or admin_ips(cfg.json),current ip:%s"
cfgJson = `{
"绑定端号": "端口",
"addr": ":8080",
"PeerID": "集群内唯一,请使用0-9的单字符,默认自动生成",
"peer_id": "%s",
"本主机地址": "本机http地址,默认自动生成(注意端口必须与addr中的端口一致),必段为内网,自动生成不为内网请自行修改,下同",
"host": "%s",
"集群": "集群列表,注意为了高可用,IP必须不能是同一个,同一不会自动备份,且不能为127.0.0.1,且必须为内网IP,默认自动生成",
"peers": ["%s"],
"组号": "用于区别不同的集群(上传或下载)与support_group_upload配合使用,带在下载路径中",
"group": "group1",
"是否合并小文件": "默认不合并,合并可以解决inode不够用的情况(当前对于小于1M文件)进行合并",
"enable_merge_small_file": false,
"重试同步失败文件的时间": "单位秒",
"refresh_interval": 1800,
"是否自动重命名": "默认不自动重命名,使用原文件名",
"rename_file": false,
"是否支持web上传,方便调试": "默认支持web上传",
"enable_web_upload": true,
"是否支持非日期路径": "默认支持非日期路径,也即支持自定义路径,需要上传文件时指定path",
"enable_custom_path": true,
"下载域名": "用于外网下载文件的域名,不包含http://",
"download_domain": "",
"场景列表": "当设定后,用户指的场景必项在列表中,默认不做限制(注意:如果想开启场景认功能,格式如下:'场景名:googleauth_secret' 如 default:N7IET373HB2C5M6D ",
"scenes": [],
"默认场景": "默认default",
"default_scene": "default",
"是否显示目录": "默认显示,方便调试用,上线时请关闭",
"show_dir": true,
"邮件配置": "",
"mail": {
"user": "[email protected]",
"password": "abc",
"host": "smtp.163.com:25"
},
"告警接收邮件列表": "接收人数组",
"alram_receivers": [],
"告警接收URL": "方法post,参数:subjet,message",
"alarm_url": "",
"下载是否需带token": "真假",
"download_use_token": false,
"下载token过期时间": "单位秒",
"download_token_expire": 600,
"是否自动修复": "在超过1亿文件时出现性能问题,取消此选项,请手动按天同步,请查看FAQ",
"auto_repair": true,
"文件去重算法md5可能存在冲突,默认md5": "sha1|md5",
"file_sum_arithmetic": "md5",
"是否支持按组(集群)管理,主要用途是Nginx支持多集群": "默认不支持,不支持时路径为http://10.1.5.4:8080/action,支持时为http://10.1.5.4:8080/group(配置中的group参数)/action,action为动作名,如status,delete,sync等",
"support_group_manage": false,
"管理ip列表": "用于管理集的ip白名单,",
"admin_ips": ["127.0.0.1"],
"是否启用迁移": "默认不启用",
"enable_migrate": false,
"文件是否去重": "默认去重",
"enable_distinct_file": true,
"是否开启跨站访问": "默认开启",
"enable_cross_origin": true,
"是否开启Google认证,实现安全的上传、下载": "默认不开启",
"enable_google_auth": false,
"认证url": "当url不为空时生效,注意:普通上传中使用http参数 auth_token 作为认证参数, 在断点续传中通过HTTP头Upload-Metadata中的auth_token作为认证参数,认证流程参考认证架构图",
"auth_url": "",
"下载是否认证": "默认不认证(注意此选项是在auth_url不为空的情况下生效)",
"enable_download_auth": false,
"默认是否下载": "默认下载",
"default_download": true,
"本机是否只读": "默认可读可写",
"read_only": false,
"是否开启断点续传": "默认开启",
"enable_tus": true
}
`
)
type Common struct {
}
type Server struct {
ldb *leveldb.DB
logDB *leveldb.DB
util *Common
statMap *CommonMap
sumMap *CommonMap //map[string]mapset.Set
queueToPeers chan FileInfo
queueFromPeers chan FileInfo
queueFileLog chan *FileLog
lockMap *CommonMap
sceneMap *CommonMap
searchMap *CommonMap
curDate string
host string
}
type FileInfo struct {
Name string `json:"name"`
ReName string `json:"rename"`
Path string `json:"path"`
Md5 string `json:"md5"`
Size int64 `json:"size"`
Peers []string `json:"peers"`
Scene string `json:"scene"`
TimeStamp int64 `json:"timeStamp"`
OffSet int64 `json:"offset"`
}
type FileLog struct {
FileInfo *FileInfo
FileName string
}
type JsonResult struct {
Message string `json:"message"`
Status string `json:"status"`
Data interface{} `json:"data"`
}
type FileResult struct {
Url string `json:"url"`
Md5 string `json:"md5"`
Path string `json:"path"`
Domain string `json:"domain"`
Scene string `json:"scene"`
Size int64 `json:"size"`
ModTime int64 `json:"mtime"`
//Just for Compatibility
Scenes string `json:"scenes"`
Retmsg string `json:"retmsg"`
Retcode int `json:"retcode"`
Src string `json:"src"`
}
type Mail struct {
User string `json:"user"`
Password string `json:"password"`
Host string `json:"host"`
}
type StatDateFileInfo struct {
Date string `json:"date"`
TotalSize int64 `json:"totalSize"`
FileCount int64 `json:"fileCount"`
}
type GloablConfig struct {
Addr string `json:"addr"`
Peers []string `json:"peers"`
Group string `json:"group"`
RenameFile bool `json:"rename_file"`
ShowDir bool `json:"show_dir"`
RefreshInterval int `json:"refresh_interval"`
EnableWebUpload bool `json:"enable_web_upload"`
DownloadDomain string `json:"download_domain"`
EnableCustomPath bool `json:"enable_custom_path"`
Scenes []string `json:"scenes"`
AlramReceivers []string `json:"alram_receivers"`
DefaultScene string `json:"default_scene"`
Mail Mail `json:"mail"`
AlarmUrl string `json:"alarm_url"`
DownloadUseToken bool `json:"download_use_token"`
DownloadTokenExpire int `json:"download_token_expire"`
QueueSize int `json:"queue_size"`
AutoRepair bool `json:"auto_repair"`
Host string `json:"host"`
FileSumArithmetic string `json:"file_sum_arithmetic"`
PeerId string `json:"peer_id"`
SupportGroupManage bool `json:"support_group_manage"`
AdminIps []string `json:"admin_ips"`
EnableMergeSmallFile bool `json:"enable_merge_small_file"`
EnableMigrate bool `json:"enable_migrate"`
EnableDistinctFile bool `json:"enable_distinct_file"`
ReadOnly bool `json:"read_only"`
EnableCrossOrigin bool `json:"enable_cross_origin"`
EnableGoogleAuth bool `json:"enable_google_auth"`
AuthUrl string `json:"auth_url"`
EnableDownloadAuth bool `json:"enable_download_auth"`
DefaultDownload bool `json:"default_download"`
EnableTus bool `json:"enable_tus"`
}
type FileInfoResult struct {
Name string `json:"name"`
Md5 string `json:"md5"`
Path string `json:"path"`
Size int64 `json:"size"`
ModTime time.Time `json:"mtime"`
IsDir bool `json:"is_dir"`
}
type Tuple struct {
Key string
Val interface{}
}
func NewServer() *Server {
var (
server *Server
err error
)
server = &Server{
util: &Common{},
statMap: NewCommonMap(0),
lockMap: NewCommonMap(0),
sceneMap: NewCommonMap(0),
searchMap: NewCommonMap(0),
queueToPeers: make(chan FileInfo, CONST_QUEUE_SIZE),
queueFromPeers: make(chan FileInfo, CONST_QUEUE_SIZE),
queueFileLog: make(chan *FileLog, CONST_QUEUE_SIZE),
sumMap: NewCommonMap(365 * 3),
}
defaultTransport := &http.Transport{
DisableKeepAlives: true,
Dial: httplib.TimeoutDialer(time.Second*6, time.Second*60),
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
}
settins := httplib.BeegoHTTPSettings{
UserAgent: "Go-FastDFS",
ConnectTimeout: 10 * time.Second,
ReadWriteTimeout: 10 * time.Second,
Gzip: true,
DumpBody: true,
Transport: defaultTransport,
}
httplib.SetDefaultSetting(settins)
server.statMap.Put(CONST_STAT_FILE_COUNT_KEY, int64(0))
server.statMap.Put(CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0))
server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_COUNT_KEY, int64(0))
server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0))
server.curDate = server.util.GetToDay()
server.ldb, err = leveldb.OpenFile(CONST_LEVELDB_FILE_NAME, nil)
if err != nil {
fmt.Println(err)
log.Error(err)
panic(err)
}
server.logDB, err = leveldb.OpenFile(CONST_LOG_LEVELDB_FILE_NAME, nil)
if err != nil {
fmt.Println(err)
log.Error(err)
panic(err)
}
return server
}
type CommonMap struct {
sync.RWMutex
m map[string]interface{}
}
func NewCommonMap(size int) *CommonMap {
if size > 0 {
return &CommonMap{m: make(map[string]interface{}, size)}
} else {
return &CommonMap{m: make(map[string]interface{})}
}
}
func (s *CommonMap) GetValue(k string) (interface{}, bool) {
s.RLock()
defer s.RUnlock()
v, ok := s.m[k]
return v, ok
}
func (s *CommonMap) Put(k string, v interface{}) {
s.Lock()
defer s.Unlock()
s.m[k] = v
}
func (s *CommonMap) Iter() <-chan Tuple { // reduce memory
ch := make(chan Tuple)
go func() {
s.RLock()
for k, v := range s.m {
ch <- Tuple{Key: k, Val: v}
}
close(ch)
s.RUnlock()
}()
return ch
}
func (s *CommonMap) LockKey(k string) {
s.Lock()
if v, ok := s.m[k]; ok {
s.m[k+"_lock_"] = true
s.Unlock()
v.(*sync.Mutex).Lock()
} else {
s.m[k] = &sync.Mutex{}
v = s.m[k]
s.m[k+"_lock_"] = true
s.Unlock()
v.(*sync.Mutex).Lock()
}
}
func (s *CommonMap) UnLockKey(k string) {
s.Lock()
if v, ok := s.m[k]; ok {
v.(*sync.Mutex).Unlock()
s.m[k+"_lock_"] = false
}
s.Unlock()
}
func (s *CommonMap) IsLock(k string) bool {
s.Lock()
if v, ok := s.m[k+"_lock_"]; ok {
s.Unlock()
return v.(bool)
}
s.Unlock()
return false
}
func (s *CommonMap) Keys() []string {
s.Lock()
keys := make([]string, len(s.m))
defer s.Unlock()
for k, _ := range s.m {
keys = append(keys, k)
}
return keys
}
func (s *CommonMap) Clear() {
s.Lock()
defer s.Unlock()
s.m = make(map[string]interface{})
}
func (s *CommonMap) Remove(key string) {
s.Lock()
defer s.Unlock()
if _, ok := s.m[key]; ok {
delete(s.m, key)
}
}
func (s *CommonMap) AddUniq(key string) {
s.Lock()
defer s.Unlock()
if _, ok := s.m[key]; !ok {
s.m[key] = nil
}
}
func (s *CommonMap) AddCount(key string, count int) {
s.Lock()
defer s.Unlock()
if _v, ok := s.m[key]; ok {
v := _v.(int)
v = v + count
s.m[key] = v
} else {
s.m[key] = 1
}
}
func (s *CommonMap) AddCountInt64(key string, count int64) {
s.Lock()
defer s.Unlock()
if _v, ok := s.m[key]; ok {
v := _v.(int64)
v = v + count
s.m[key] = v
} else {
s.m[key] = count
}
}
func (s *CommonMap) Add(key string) {
s.Lock()
defer s.Unlock()
if _v, ok := s.m[key]; ok {
v := _v.(int)
v = v + 1
s.m[key] = v
} else {
s.m[key] = 1
}
}
func (s *CommonMap) Zero() {
s.Lock()
defer s.Unlock()
for k := range s.m {
s.m[k] = 0
}
}
func (s *CommonMap) Contains(i ...interface{}) bool {
s.Lock()
defer s.Unlock()
for _, val := range i {
if _, ok := s.m[val.(string)]; !ok {
return false
}
}
return true
}
func (s *CommonMap) Get() map[string]interface{} {
s.Lock()
defer s.Unlock()
m := make(map[string]interface{})
for k, v := range s.m {
m[k] = v
}
return m
}
func Config() *GloablConfig {
return (*GloablConfig)(atomic.LoadPointer(&ptr))
}
func ParseConfig(filePath string) {
var (
data []byte
)
if filePath == "" {
data = []byte(strings.TrimSpace(cfgJson))
} else {
file, err := os.Open(filePath)
if err != nil {
panic(fmt.Sprintln("open file path:", filePath, "error:", err))
}
defer file.Close()
FileName = filePath
data, err = ioutil.ReadAll(file)
if err != nil {
panic(fmt.Sprintln("file path:", filePath, " read all error:", err))
}
}
var c GloablConfig
if err := json.Unmarshal(data, &c); err != nil {
panic(fmt.Sprintln("file path:", filePath, "json unmarshal error:", err))
}
log.Info(c)
atomic.StorePointer(&ptr, unsafe.Pointer(&c))
log.Info("config parse success")
}
func (this *Common) GetUUID() string {
b := make([]byte, 48)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
}
id := this.MD5(base64.URLEncoding.EncodeToString(b))
return fmt.Sprintf("%s-%s-%s-%s-%s", id[0:8], id[8:12], id[12:16], id[16:20], id[20:])
}
func (this *Common) CopyFile(src, dst string) (int64, error) {
sourceFileStat, err := os.Stat(src)
if err != nil {
return 0, err
}
if !sourceFileStat.Mode().IsRegular() {
return 0, fmt.Errorf("%s is not a regular file", src)
}
source, err := os.Open(src)
if err != nil {
return 0, err
}
defer source.Close()
destination, err := os.Create(dst)
if err != nil {
return 0, err
}
defer destination.Close()
nBytes, err := io.Copy(destination, source)
return nBytes, err
}
func (this *Common) RandInt(min, max int) int {
return func(min, max int) int {
r := random.New(random.NewSource(time.Now().UnixNano()))
if min >= max {
return max
}
return r.Intn(max-min) + min
}(min, max)
}
func (this *Common) GetToDay() string {
return time.Now().Format("20060102")
}
func (this *Common) UrlEncode(v interface{}) string {
switch v.(type) {
case string:
m := make(map[string]string)
m["name"] = v.(string)
return strings.Replace(this.UrlEncodeFromMap(m), "name=", "", 1)
case map[string]string:
return this.UrlEncodeFromMap(v.(map[string]string))
default:
return fmt.Sprintf("%v", v)
}
}
func (this *Common) UrlEncodeFromMap(m map[string]string) string {
vv := url.Values{}
for k, v := range m {
vv.Add(k, v)
}
return vv.Encode()
}
func (this *Common) UrlDecodeToMap(body string) (map[string]string, error) {
var (
err error
m map[string]string
v url.Values
)
m = make(map[string]string)
if v, err = url.ParseQuery(body); err != nil {
return m, err
}
for _k, _v := range v {
if len(_v) > 0 {
m[_k] = _v[0]
}
}
return m, nil
}
func (this *Common) GetDayFromTimeStamp(timeStamp int64) string {
return time.Unix(timeStamp, 0).Format("20060102")
}
func (this *Common) StrToMapSet(str string, sep string) mapset.Set {
result := mapset.NewSet()
for _, v := range strings.Split(str, sep) {
result.Add(v)
}
return result
}
func (this *Common) MapSetToStr(set mapset.Set, sep string) string {
var (
ret []string
)
for v := range set.Iter() {
ret = append(ret, v.(string))
}
return strings.Join(ret, sep)
}
func (this *Common) GetPulicIP() string {
var (
err error
conn net.Conn
)
if conn, err = net.Dial("udp", "8.8.8.8:80"); err != nil {
return "127.0.0.1"
}
defer conn.Close()
localAddr := conn.LocalAddr().String()
idx := strings.LastIndex(localAddr, ":")
return localAddr[0:idx]
}
func (this *Common) MD5(str string) string {
md := md5.New()
md.Write([]byte(str))
return fmt.Sprintf("%x", md.Sum(nil))
}
func (this *Common) GetFileMd5(file *os.File) string {
file.Seek(0, 0)
md5h := md5.New()
io.Copy(md5h, file)
sum := fmt.Sprintf("%x", md5h.Sum(nil))
return sum
}
func (this *Common) GetFileSum(file *os.File, alg string) string {
alg = strings.ToLower(alg)
if alg == "sha1" {
return this.GetFileSha1Sum(file)
} else {
return this.GetFileMd5(file)
}
}
func (this *Common) GetFileSumByName(filepath string, alg string) (string, error) {
var (
err error
file *os.File
)
file, err = os.Open(filepath)
if err != nil {
return "", err
}
defer file.Close()
alg = strings.ToLower(alg)
if alg == "sha1" {
return this.GetFileSha1Sum(file), nil
} else {
return this.GetFileMd5(file), nil
}
}
func (this *Common) GetFileSha1Sum(file *os.File) string {
file.Seek(0, 0)
md5h := sha1.New()
io.Copy(md5h, file)
sum := fmt.Sprintf("%x", md5h.Sum(nil))
return sum
}
func (this *Common) WriteFileByOffSet(filepath string, offset int64, data []byte) (error) {
var (
err error
file *os.File
count int
)
file, err = os.OpenFile(filepath, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return err
}
defer file.Close()
count, err = file.WriteAt(data, offset)
if err != nil {
return err
}
if count != len(data) {
return errors.New(fmt.Sprintf("write %s error", filepath))
}
return nil
}
func (this *Common) ReadFileByOffSet(filepath string, offset int64, length int) ([]byte, error) {
var (
err error
file *os.File
result []byte
count int
)
file, err = os.Open(filepath)
if err != nil {
return nil, err
}
defer file.Close()
result = make([]byte, length)
count, err = file.ReadAt(result, offset)
if err != nil {
return nil, err
}
if count != length {
return nil, errors.New("read error")
}
return result, nil
}
func (this *Common) Contains(obj interface{}, arrayobj interface{}) bool {
targetValue := reflect.ValueOf(arrayobj)
switch reflect.TypeOf(arrayobj).Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < targetValue.Len(); i++ {
if targetValue.Index(i).Interface() == obj {
return true
}
}
case reflect.Map:
if targetValue.MapIndex(reflect.ValueOf(obj)).IsValid() {
return true
}
}
return false
}
func (this *Common) FileExists(fileName string) bool {
_, err := os.Stat(fileName)
return err == nil
}
func (this *Common) WriteFile(path string, data string) bool {
if err := ioutil.WriteFile(path, []byte(data), 0775); err == nil {
return true
} else {
return false
}
}
func (this *Common) WriteBinFile(path string, data []byte) bool {
if err := ioutil.WriteFile(path, data, 0775); err == nil {
return true
} else {
return false
}
}
func (this *Common) IsExist(filename string) bool {
_, err := os.Stat(filename)
return err == nil || os.IsExist(err)
}
func (this *Common) Match(matcher string, content string) []string {
var result []string
if reg, err := regexp.Compile(matcher); err == nil {
result = reg.FindAllString(content, -1)
}
return result
}
func (this *Common) ReadBinFile(path string) ([]byte, error) {
if this.IsExist(path) {
fi, err := os.Open(path)
if err != nil {
return nil, err
}
defer fi.Close()
return ioutil.ReadAll(fi)
} else {
return nil, errors.New("not found")
}
}
func (this *Common) RemoveEmptyDir(pathname string) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("postFileToPeer")
log.Error(re)
log.Error(string(buffer))
}
}()
handlefunc := func(file_path string, f os.FileInfo, err error) error {
if f.IsDir() {
files, _ := ioutil.ReadDir(file_path)
if len(files) == 0 && file_path != pathname {
os.Remove(file_path)
}
}
return nil
}
fi, _ := os.Stat(pathname)
if fi.IsDir() {
filepath.Walk(pathname, handlefunc)
}
}
func (this *Common) JsonEncodePretty(o interface{}) string {
resp := ""
switch o.(type) {
case map[string]interface{}:
if data, err := json.Marshal(o); err == nil {
resp = string(data)
}
case map[string]string:
if data, err := json.Marshal(o); err == nil {
resp = string(data)
}
case []interface{}:
if data, err := json.Marshal(o); err == nil {
resp = string(data)
}
case []string:
if data, err := json.Marshal(o); err == nil {
resp = string(data)
}
case string:
resp = o.(string)
default:
if data, err := json.Marshal(o); err == nil {
resp = string(data)
}
}
var v interface{}
if ok := json.Unmarshal([]byte(resp), &v); ok == nil {
if buf, ok := json.MarshalIndent(v, "", " "); ok == nil {
resp = string(buf)
}
}
return resp
}
func (this *Common) GetClientIp(r *http.Request) string {
client_ip := ""
headers := []string{"X_Forwarded_For", "X-Forwarded-For", "X-Real-Ip",
"X_Real_Ip", "Remote_Addr", "Remote-Addr"}
for _, v := range headers {
if _v, ok := r.Header[v]; ok {
if len(_v) > 0 {
client_ip = _v[0]
break
}
}
}
if client_ip == "" {
clients := strings.Split(r.RemoteAddr, ":")
client_ip = clients[0]
}
return client_ip
}
func (this *Server) BackUpMetaDataByDate(date string) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("BackUpMetaDataByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
msg string
name string
fileInfo FileInfo
logFileName string
fileLog *os.File
fileMeta *os.File
metaFileName string
fi os.FileInfo
)
logFileName = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
this.lockMap.LockKey(logFileName)
defer this.lockMap.UnLockKey(logFileName)
metaFileName = DATA_DIR + "/" + date + "/" + "meta.data"
os.MkdirAll(DATA_DIR+"/"+date, 0775)
if this.util.IsExist(logFileName) {
os.Remove(logFileName)
}
if this.util.IsExist(metaFileName) {
os.Remove(metaFileName)
}
fileLog, err = os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
log.Error(err)
return
}
defer fileLog.Close()
fileMeta, err = os.OpenFile(metaFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
log.Error(err)
return
}
defer fileMeta.Close()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
defer iter.Release()
for iter.Next() {
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
name = fileInfo.Name
if fileInfo.ReName != "" {
name = fileInfo.ReName
}
msg = fmt.Sprintf("%s\t%s\n", fileInfo.Md5, string(iter.Value()))
if _, err = fileMeta.WriteString(msg); err != nil {
log.Error(err)
}
msg = fmt.Sprintf("%s\t%s\n", this.util.MD5(fileInfo.Path+"/"+name), string(iter.Value()))
if _, err = fileMeta.WriteString(msg); err != nil {
log.Error(err)
}
msg = fmt.Sprintf("%s|%d|%d|%s\n", fileInfo.Md5, fileInfo.Size, fileInfo.TimeStamp, fileInfo.Path+"/"+name)
if _, err = fileLog.WriteString(msg); err != nil {
log.Error(err)
}
}
if fi, err = fileLog.Stat(); err != nil {
log.Error(err)
} else if (fi.Size() == 0) {
fileLog.Close()
os.Remove(logFileName)
}
if fi, err = fileMeta.Stat(); err != nil {
log.Error(err)
} else if (fi.Size() == 0) {
fileMeta.Close()
os.Remove(metaFileName)
}
}
func (this *Server) RepairFileInfoFromFile() {
var (
pathPrefix string
err error
fi os.FileInfo
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("RepairFileInfoFromFile")
log.Error(re)
log.Error(string(buffer))
}
}()
if this.lockMap.IsLock("RepairFileInfoFromFile") {
log.Warn("Lock RepairFileInfoFromFile")
return
}
this.lockMap.LockKey("RepairFileInfoFromFile")
defer this.lockMap.UnLockKey("RepairFileInfoFromFile")
handlefunc := func(file_path string, f os.FileInfo, err error) error {
var (
files []os.FileInfo
fi os.FileInfo
fileInfo FileInfo
sum string
pathMd5 string
)
if f.IsDir() {
files, err = ioutil.ReadDir(file_path)
if err != nil {
return err
}
for _, fi = range files {
if fi.IsDir() || fi.Size() == 0 {
continue
}
file_path = strings.Replace(file_path, "\\", "/", -1)
if DOCKER_DIR != "" {
file_path = strings.Replace(file_path, DOCKER_DIR, "", 1)
}
if pathPrefix != "" {
file_path = strings.Replace(file_path, pathPrefix, STORE_DIR_NAME, 1)
}
if strings.HasPrefix(file_path, STORE_DIR_NAME+"/"+LARGE_DIR_NAME) {
log.Info(fmt.Sprintf("ignore small file file %s", file_path+"/"+fi.Name()))
continue
}
pathMd5 = this.util.MD5(file_path + "/" + fi.Name())
//if finfo, _ := this.GetFileInfoFromLevelDB(pathMd5); finfo != nil && finfo.Md5 != "" {
// log.Info(fmt.Sprintf("exist ignore file %s", file_path+"/"+fi.Name()))
// continue
//}
//sum, err = this.util.GetFileSumByName(file_path+"/"+fi.Name(), Config().FileSumArithmetic)
sum = pathMd5
if err != nil {
log.Error(err)
continue
}
fileInfo = FileInfo{
Size: fi.Size(),
Name: fi.Name(),
Path: file_path,
Md5: sum,
TimeStamp: fi.ModTime().Unix(),
Peers: []string{this.host},
OffSet: -2,
}
//log.Info(fileInfo)
log.Info(file_path, fi.Name())
//this.AppendToQueue(&fileInfo)
this.postFileToPeer(&fileInfo)
this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb)
//this.SaveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME)
}
}
return nil
}
pathname := STORE_DIR
pathPrefix, err = os.Readlink(pathname)
if err == nil { //link
pathname = pathPrefix
}
fi, err = os.Stat(pathname)
if err != nil {
log.Error(err)
}
if fi.IsDir() {
filepath.Walk(pathname, handlefunc)
}
log.Info("RepairFileInfoFromFile is finish.")
}
func (this *Server) RepairStatByDate(date string) StatDateFileInfo {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("RepairStatByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
fileInfo FileInfo
fileCount int64
fileSize int64
stat StatDateFileInfo
)
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
defer iter.Release()
for iter.Next() {
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
fileCount = fileCount + 1
fileSize = fileSize + fileInfo.Size
}
this.statMap.Put(date+"_"+CONST_STAT_FILE_COUNT_KEY, fileCount)
this.statMap.Put(date+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileSize)
this.SaveStat()
stat.Date = date
stat.FileCount = fileCount
stat.TotalSize = fileSize
return stat
}
func (this *Server) GetFilePathByInfo(fileInfo *FileInfo) string {
var (
fn string
)
fn = fileInfo.Name
if fileInfo.ReName != "" {
fn = fileInfo.ReName
}
return DOCKER_DIR + fileInfo.Path + "/" + fn
}
func (this *Server) CheckFileExistByMd5(md5s string, fileInfo *FileInfo) bool { // important: just for DownloadFromPeer use
var (
err error
info *FileInfo
fn string
name string
offset int64
data []byte
)
if info, err = this.GetFileInfoFromLevelDB(md5s); err != nil {
return false
}
if info == nil || info.Md5 == "" {
return false
}
if info.Path != fileInfo.Path { // upload thee same file at a tiime from two peer
return false
}
fn = info.Name
if info.ReName != "" {
fn = info.ReName
}
if info.OffSet == -1 {
if this.util.FileExists(DOCKER_DIR + info.Path + "/" + fn) {
return true
} else {
return false
}
} else { //small file
if name, offset, _, err = this.ParseSmallFile(fn); err != nil {
return false
}
if !this.util.FileExists(DOCKER_DIR + info.Path + "/" + name) {
return false
}
if data, err = this.util.ReadFileByOffSet(DOCKER_DIR+info.Path+"/"+name, offset, 1); err != nil {
return false
}
if data[0] == '1' {
return true
}
}
if info != nil && info.Md5 != "" {
if fileInfo != nil {
if fileInfo.Path != info.Path {
return false
}
}
return true
} else {
return false
}
}
func (this *Server) ParseSmallFile(filename string) (string, int64, int, error) {
var (
err error
offset int64
length int
)
err = errors.New("unvalid small file")
if len(filename) < 3 {
return filename, -1, -1, err
}
if strings.Contains(filename, "/") {
filename = filename[strings.LastIndex(filename, "/")+1:]
}
pos := strings.Split(filename, ",")
if len(pos) < 3 {
return filename, -1, -1, err
}
offset, err = strconv.ParseInt(pos[1], 10, 64)
if err != nil {
return filename, -1, -1, err
}
if length, err = strconv.Atoi(pos[2]); err != nil {
return filename, offset, -1, err
}
if length > CONST_SMALL_FILE_SIZE || offset < 0 {
err = errors.New("invalid filesize or offset")
return filename, -1, -1, err
}
return pos[0], offset, length, nil
}
func (this *Server) DownloadFromPeer(peer string, fileInfo *FileInfo) {
var (
err error
filename string
fpath string
fi os.FileInfo
sum string
data []byte
downloadUrl string
)
if Config().ReadOnly {
log.Warn("ReadOnly", fileInfo)
return
}
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
if this.CheckFileExistByMd5(fileInfo.Md5, fileInfo) && Config().EnableDistinctFile {
return
}
if !Config().EnableDistinctFile && this.util.FileExists(this.GetFilePathByInfo(fileInfo)) {
return
}
if _, err = os.Stat(fileInfo.Path); err != nil {
os.MkdirAll(DOCKER_DIR+fileInfo.Path, 0775)
}
//fmt.Println("downloadFromPeer",fileInfo)
p := strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1)
//filename=this.util.UrlEncode(filename)
downloadUrl = peer + "/" + Config().Group + "/" + p + "/" + filename
log.Info("DownloadFromPeer: ", downloadUrl)
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
timeout := fileInfo.Size/1024/1024/8 + 30
if fileInfo.OffSet == -2 { //migrate file
this.lockMap.LockKey(fpath)
defer this.lockMap.UnLockKey(fpath)
if fi, err = os.Stat(fpath); err == nil && fi.Size() == fileInfo.Size { //prevent double download
this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb)
log.Info(fmt.Sprintf("file '%s' has download", fpath))
return
}
req := httplib.Get(downloadUrl)
req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout))
if err = req.ToFile(fpath); err != nil {
log.Error(err)
return
}
//this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb)
return
}
req := httplib.Get(downloadUrl)
req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout))
if fileInfo.OffSet != -1 { //small file download
data, err = req.Bytes()
if err != nil {
log.Error(err)
return
}
data2 := make([]byte, len(data)+1)
data2[0] = '1'
for i, v := range data {
data2[i+1] = v
}
data = data2
if int64(len(data)) != fileInfo.Size {
log.Warn("file size is error")
return
}
fpath = strings.Split(fpath, ",")[0]
err = this.util.WriteFileByOffSet(fpath, fileInfo.OffSet, data)
if err != nil {
log.Warn(err)
return
}
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
return
}
if err = req.ToFile(fpath); err != nil {
log.Error(err)
return
}
if fi, err = os.Stat(fpath); err != nil {
os.Remove(fpath)
return
}
if sum, err = this.util.GetFileSumByName(fpath, Config().FileSumArithmetic); err != nil {
log.Error(err)
return
}
if fi.Size() != fileInfo.Size || sum != fileInfo.Md5 {
log.Error("file sum check error")
os.Remove(fpath)
return
}
if this.util.IsExist(fpath) {
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
}
}
func (this *Server) CrossOrigin(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, X-Requested-By, If-Modified-Since, X-File-Name, X-File-Type, Cache-Control, Origin")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS, PUT, DELETE")
w.Header().Set("Access-Control-Expose-Headers", "Authorization")
//https://blog.csdn.net/yanzisu_congcong/article/details/80552155
}
func (this *Server) SetDownloadHeader(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", "attachment")
}
func (this *Server) CheckAuth(w http.ResponseWriter, r *http.Request) bool {
var (
err error
req *httplib.BeegoHTTPRequest
result string
)
if err = r.ParseForm(); err != nil {
log.Error(err)
return false
}
req = httplib.Post(Config().AuthUrl)
req.SetTimeout(time.Second*10, time.Second*10)
for k, _ := range r.Form {
req.Param(k, r.FormValue(k))
}
if result, err = req.String(); err != nil {
return false
}
if result != "ok" {
return false
}
return true
}
func (this *Server) NotPermit(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(403)
}
func (this *Server) GetFilePathFromRequest(w http.ResponseWriter, r *http.Request) (string, string) {
var (
err error
fullpath string
smallPath string
)
fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)]
fullpath = strings.Split(fullpath, "?")[0] // just path
fullpath = DOCKER_DIR + STORE_DIR_NAME + "/" + fullpath
if strings.HasPrefix(r.RequestURI, "/"+Config().Group+"/"+LARGE_DIR_NAME+"/") {
smallPath = fullpath //notice order
fullpath = strings.Split(fullpath, ",")[0]
}
if fullpath, err = url.PathUnescape(fullpath); err != nil {
log.Error(err)
}
return fullpath, smallPath
}
func (this *Server) CheckDownloadAuth(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
maxTimestamp int64
minTimestamp int64
ts int64
token string
timestamp string
fullpath string
smallPath string
pathMd5 string
fileInfo *FileInfo
scene string
secret interface{}
code string
ok bool
)
CheckToken := func(token string, md5sum string, timestamp string) bool {
if this.util.MD5(md5sum+timestamp) != token {
return false
}
return true
}
if Config().EnableDownloadAuth && Config().AuthUrl != "" && !this.IsPeer(r) && !this.CheckAuth(w, r) {
return false, errors.New("auth fail")
}
if Config().DownloadUseToken && !this.IsPeer(r) {
token = r.FormValue("token")
timestamp = r.FormValue("timestamp")
if token == "" || timestamp == "" {
return false, errors.New("unvalid request")
}
maxTimestamp = time.Now().Add(time.Second *
time.Duration(Config().DownloadTokenExpire)).Unix()
minTimestamp = time.Now().Add(-time.Second *
time.Duration(Config().DownloadTokenExpire)).Unix()
if ts, err = strconv.ParseInt(timestamp, 10, 64); err != nil {
return false, errors.New("unvalid timestamp")
}
if ts > maxTimestamp || ts < minTimestamp {
return false, errors.New("timestamp expire")
}
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
if smallPath != "" {
pathMd5 = this.util.MD5(smallPath)
} else {
pathMd5 = this.util.MD5(fullpath)
}
if fileInfo, err = this.GetFileInfoFromLevelDB(pathMd5); err != nil {
// TODO
} else {
ok := CheckToken(token, fileInfo.Md5, timestamp)
if !ok {
return ok, errors.New("unvalid token")
}
return ok, nil
}
}
if Config().EnableGoogleAuth && !this.IsPeer(r) {
fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)]
fullpath = strings.Split(fullpath, "?")[0] // just path
scene = strings.Split(fullpath, "/")[0]
code = r.FormValue("code")
if secret, ok = this.sceneMap.GetValue(scene); ok {
if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) {
return false, errors.New("invalid google code")
}
}
}
return true, nil
}
func (this *Server) GetSmallFileByURI(w http.ResponseWriter, r *http.Request) ([]byte, bool, error) {
var (
err error
data []byte
offset int64
length int
fullpath string
info os.FileInfo
)
fullpath, _ = this.GetFilePathFromRequest(w, r)
if _, offset, length, err = this.ParseSmallFile(r.RequestURI); err != nil {
return nil, false, err
}
if info, err = os.Stat(fullpath); err != nil {
return nil, false, err
}
if info.Size() < offset+int64(length) {
return nil, true, errors.New("noFound")
} else {
data, err = this.util.ReadFileByOffSet(fullpath, offset, length)
if err != nil {
return nil, false, err
}
return data, false, err
}
}
func (this *Server) DownloadSmallFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
data []byte
isDownload bool
imgWidth int
imgHeight int
width string
height string
notFound bool
)
r.ParseForm()
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
width = r.FormValue("width")
height = r.FormValue("height")
if width != "" {
imgWidth, err = strconv.Atoi(width)
if err != nil {
log.Error(err)
}
}
if height != "" {
imgHeight, err = strconv.Atoi(height)
if err != nil {
log.Error(err)
}
}
data, notFound, err = this.GetSmallFileByURI(w, r)
_ = notFound
if data != nil && string(data[0]) == "1" {
if isDownload {
this.SetDownloadHeader(w, r)
}
if (imgWidth != 0 || imgHeight != 0) {
this.ResizeImageByBytes(w, data[1:], uint(imgWidth), uint(imgHeight))
return true, nil
}
w.Write(data[1:])
return true, nil
}
return false, errors.New("not found")
}
func (this *Server) DownloadNormalFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
isDownload bool
imgWidth int
imgHeight int
width string
height string
)
r.ParseForm()
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
width = r.FormValue("width")
height = r.FormValue("height")
if width != "" {
imgWidth, err = strconv.Atoi(width)
if err != nil {
log.Error(err)
}
}
if height != "" {
imgHeight, err = strconv.Atoi(height)
if err != nil {
log.Error(err)
}
}
fmt.Println(isDownload)
if isDownload {
this.SetDownloadHeader(w, r)
}
fullpath, _ := this.GetFilePathFromRequest(w, r)
if (imgWidth != 0 || imgHeight != 0) {
this.ResizeImage(w, fullpath, uint(imgWidth), uint(imgHeight))
return true, nil
}
staticHandler.ServeHTTP(w, r)
return true, nil
}
func (this *Server) DownloadNotFound(w http.ResponseWriter, r *http.Request) {
var (
err error
fullpath string
smallPath string
isDownload bool
pathMd5 string
peer string
fileInfo *FileInfo
)
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
if smallPath != "" {
pathMd5 = this.util.MD5(smallPath)
} else {
pathMd5 = this.util.MD5(fullpath)
}
for _, peer = range Config().Peers {
if fileInfo, err = this.checkPeerFileExist(peer, pathMd5); err != nil {
log.Error(err)
continue
}
if fileInfo.Md5 != "" {
go this.DownloadFromPeer(peer, fileInfo)
//http.Redirect(w, r, peer+r.RequestURI, 302)
if isDownload {
this.SetDownloadHeader(w, r)
}
this.DownloadFileToResponse(peer+r.RequestURI, w, r)
return
}
}
w.WriteHeader(404)
return
}
func (this *Server) Download(w http.ResponseWriter, r *http.Request) {
var (
err error
ok bool
fullpath string
smallPath string
fi os.FileInfo
)
if ok, err = this.CheckDownloadAuth(w, r); !ok {
log.Error(err)
this.NotPermit(w, r)
return
}
if Config().EnableCrossOrigin {
this.CrossOrigin(w, r)
}
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
if smallPath == "" {
if fi, err = os.Stat(fullpath); err != nil {
this.DownloadNotFound(w, r)
return
}
if !Config().ShowDir && fi.IsDir() {
w.Write([]byte("list dir deny"))
return
}
//staticHandler.ServeHTTP(w, r)
this.DownloadNormalFileByURI(w, r)
return
}
if smallPath != "" {
if ok, err = this.DownloadSmallFileByURI(w, r); !ok {
this.DownloadNotFound(w, r)
return
}
return
}
}
func (this *Server) DownloadFileToResponse(url string, w http.ResponseWriter, r *http.Request) {
var (
err error
req *httplib.BeegoHTTPRequest
resp *http.Response
)
req = httplib.Get(url)
req.SetTimeout(time.Second*20, time.Second*600)
resp, err = req.DoRequest()
if err != nil {
log.Error(err)
}
defer resp.Body.Close()
_, err = io.Copy(w, resp.Body)
if err != nil {
log.Error(err)
}
}
func (this *Server) ResizeImageByBytes(w http.ResponseWriter, data []byte, width, height uint) {
var (
img image.Image
err error
imgType string
)
reader := bytes.NewReader(data)
img, imgType, err = image.Decode(reader)
if err != nil {
log.Error(err)
return
}
img = resize.Resize(width, height, img, resize.Lanczos3)
if imgType == "jpg" || imgType == "jpeg" {
jpeg.Encode(w, img, nil)
} else if imgType == "png" {
png.Encode(w, img)
} else {
w.Write(data)
}
}
func (this *Server) ResizeImage(w http.ResponseWriter, fullpath string, width, height uint) {
var (
img image.Image
err error
imgType string
file *os.File
)
file, err = os.Open(fullpath)
if err != nil {
log.Error(err)
return
}
img, imgType, err = image.Decode(file)
if err != nil {
log.Error(err)
return
}
file.Close()
img = resize.Resize(width, height, img, resize.Lanczos3)
if imgType == "jpg" || imgType == "jpeg" {
jpeg.Encode(w, img, nil)
} else if imgType == "png" {
png.Encode(w, img)
} else {
file.Seek(0, 0)
io.Copy(w, file)
}
}
func (this *Server) GetServerURI(r *http.Request) string {
return fmt.Sprintf("http://%s/", r.Host)
}
func (this *Server) CheckFileAndSendToPeer(date string, filename string, isForceUpload bool) {
var (
md5set mapset.Set
err error
md5s []interface{}
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CheckFileAndSendToPeer")
log.Error(re)
log.Error(string(buffer))
}
}()
if md5set, err = this.GetMd5sByDate(date, filename); err != nil {
log.Error(err)
return
}
md5s = md5set.ToSlice()
for _, md := range md5s {
if md == nil {
continue
}
if fileInfo, _ := this.GetFileInfoFromLevelDB(md.(string)); fileInfo != nil && fileInfo.Md5 != "" {
if isForceUpload {
fileInfo.Peers = []string{}
}
if len(fileInfo.Peers) > len(Config().Peers) {
continue
}
if !this.util.Contains(this.host, fileInfo.Peers) {
fileInfo.Peers = append(fileInfo.Peers, this.host) // peer is null
}
if filename == CONST_Md5_QUEUE_FILE_NAME {
this.AppendToDownloadQueue(fileInfo)
} else {
this.AppendToQueue(fileInfo)
}
}
}
}
func (this *Server) postFileToPeer(fileInfo *FileInfo) {
var (
err error
peer string
filename string
info *FileInfo
postURL string
result string
fi os.FileInfo
i int
data []byte
fpath string
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("postFileToPeer")
log.Error(re)
log.Error(string(buffer))
}
}()
//fmt.Println("postFile",fileInfo)
for i, peer = range Config().Peers {
_ = i
if fileInfo.Peers == nil {
fileInfo.Peers = []string{}
}
if this.util.Contains(peer, fileInfo.Peers) {
continue
}
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
if fileInfo.OffSet != -1 {
filename = strings.Split(fileInfo.ReName, ",")[0]
}
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
if !this.util.FileExists(fpath) {
log.Warn(fmt.Sprintf("file '%s' not found", fpath))
continue
} else {
if fileInfo.Size == 0 {
if fi, err = os.Stat(fpath); err != nil {
log.Error(err)
} else {
fileInfo.Size = fi.Size()
}
}
}
if fileInfo.OffSet != -2 { //migrate file
if info, err = this.checkPeerFileExist(peer, fileInfo.Md5); info.Md5 != "" {
fileInfo.Peers = append(fileInfo.Peers, peer)
if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error(err)
}
continue
}
}
postURL = fmt.Sprintf("%s%s", peer, this.getRequestURI("syncfile_info"))
b := httplib.Post(postURL)
b.SetTimeout(time.Second*30, time.Second*30)
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
return
}
b.Param("fileInfo", string(data))
result, err = b.String()
if !strings.HasPrefix(result, "http://") || err != nil {
this.SaveFileMd5Log(fileInfo, CONST_Md5_ERROR_FILE_NAME)
}
if strings.HasPrefix(result, "http://") {
log.Info(result)
if !this.util.Contains(peer, fileInfo.Peers) {
fileInfo.Peers = append(fileInfo.Peers, peer)
if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error(err)
}
}
}
if err != nil {
log.Error(err)
}
}
}
func (this *Server) SaveFileMd5Log(fileInfo *FileInfo, filename string) {
var (
info FileInfo
)
for len(this.queueFileLog)+len(this.queueFileLog)/10 > CONST_QUEUE_SIZE {
time.Sleep(time.Second * 1)
}
info = *fileInfo
this.queueFileLog <- &FileLog{FileInfo: &info, FileName: filename}
}
func (this *Server) saveFileMd5Log(fileInfo *FileInfo, filename string) {
var (
err error
outname string
logDate string
ok bool
fullpath string
md5Path string
logKey string
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("saveFileMd5Log")
log.Error(re)
log.Error(string(buffer))
}
}()
if fileInfo == nil || fileInfo.Md5 == "" || filename == "" {
log.Warn("saveFileMd5Log", fileInfo, filename)
return
}
logDate = this.util.GetDayFromTimeStamp(fileInfo.TimeStamp)
outname = fileInfo.Name
if fileInfo.ReName != "" {
outname = fileInfo.ReName
}
fullpath = fileInfo.Path + "/" + outname
logKey = fmt.Sprintf("%s_%s_%s", logDate, filename, fileInfo.Md5)
if filename == CONST_FILE_Md5_FILE_NAME {
//this.searchMap.Put(fileInfo.Md5, fileInfo.Name)
if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); !ok {
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, 1)
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileInfo.Size)
this.SaveStat()
}
if _, err = this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB); err != nil {
log.Error(err)
}
if _, err := this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error("saveToLevelDB", err, fileInfo)
}
if _, err = this.SaveFileInfoToLevelDB(this.util.MD5(fullpath), fileInfo, this.ldb); err != nil {
log.Error("saveToLevelDB", err, fileInfo)
}
return
}
if filename == CONST_REMOME_Md5_FILE_NAME {
//this.searchMap.Remove(fileInfo.Md5)
if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); ok {
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, -1)
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, -fileInfo.Size)
this.SaveStat()
}
this.RemoveKeyFromLevelDB(logKey, this.logDB)
md5Path = this.util.MD5(fullpath)
if err := this.RemoveKeyFromLevelDB(fileInfo.Md5, this.ldb); err != nil {
log.Error("RemoveKeyFromLevelDB", err, fileInfo)
}
if err = this.RemoveKeyFromLevelDB(md5Path, this.ldb); err != nil {
log.Error("RemoveKeyFromLevelDB", err, fileInfo)
}
return
}
this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB)
}
func (this *Server) checkPeerFileExist(peer string, md5sum string) (*FileInfo, error) {
var (
err error
fileInfo FileInfo
)
req := httplib.Post(fmt.Sprintf("%s%s?md5=%s", peer, this.getRequestURI("check_file_exist"), md5sum))
req.SetTimeout(time.Second*5, time.Second*10)
if err = req.ToJSON(&fileInfo); err != nil {
return &FileInfo{}, err
}
if fileInfo.Md5 == "" {
return &fileInfo, errors.New("not found")
}
return &fileInfo, nil
}
func (this *Server) CheckFileExist(w http.ResponseWriter, r *http.Request) {
var (
data []byte
err error
fileInfo *FileInfo
fpath string
)
r.ParseForm()
md5sum := ""
md5sum = r.FormValue("md5")
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); fileInfo != nil {
if fileInfo.OffSet != -1 {
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
}
w.Write(data)
return
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name
if fileInfo.ReName != "" {
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName
}
if this.util.IsExist(fpath) {
if data, err = json.Marshal(fileInfo); err == nil {
w.Write(data)
return
} else {
log.Error(err)
}
} else {
if fileInfo.OffSet == -1 {
this.RemoveKeyFromLevelDB(md5sum, this.ldb) // when file delete,delete from leveldb
}
}
}
data, _ = json.Marshal(FileInfo{})
w.Write(data)
return
}
func (this *Server) Sync(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
r.ParseForm()
result.Status = "fail"
if !this.IsPeer(r) {
result.Message = "client must be in cluster"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date := ""
force := ""
inner := ""
isForceUpload := false
force = r.FormValue("force")
date = r.FormValue("date")
inner = r.FormValue("inner")
if force == "1" {
isForceUpload = true
}
if inner != "1" {
for _, peer := range Config().Peers {
req := httplib.Post(peer + this.getRequestURI("sync"))
req.Param("force", force)
req.Param("inner", "1")
req.Param("date", date)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
if date == "" {
result.Message = "require paramete date &force , ?date=20181230"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date = strings.Replace(date, ".", "", -1)
if isForceUpload {
go this.CheckFileAndSendToPeer(date, CONST_FILE_Md5_FILE_NAME, isForceUpload)
} else {
go this.CheckFileAndSendToPeer(date, CONST_Md5_ERROR_FILE_NAME, isForceUpload)
}
result.Status = "ok"
result.Message = "job is running"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) IsExistFromLevelDB(key string, db *leveldb.DB) (bool, error) {
return db.Has([]byte(key), nil)
}
func (this *Server) GetFileInfoFromLevelDB(key string) (*FileInfo, error) {
var (
err error
data []byte
fileInfo FileInfo
)
if data, err = this.ldb.Get([]byte(key), nil); err != nil {
return nil, err
}
if err = json.Unmarshal(data, &fileInfo); err != nil {
return nil, err
}
return &fileInfo, nil
}
func (this *Server) SaveStat() {
SaveStatFunc := func() {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("SaveStatFunc")
log.Error(re)
log.Error(string(buffer))
}
}()
stat := this.statMap.Get()
if v, ok := stat[CONST_STAT_FILE_COUNT_KEY]; ok {
switch v.(type) {
case int64, int32, int, float64, float32:
if v.(int64) >= 0 {
if data, err := json.Marshal(stat); err != nil {
log.Error(err)
} else {
this.util.WriteBinFile(CONST_STAT_FILE_NAME, data)
}
}
}
}
}
SaveStatFunc()
}
func (this *Server) RemoveKeyFromLevelDB(key string, db *leveldb.DB) (error) {
var (
err error
)
err = db.Delete([]byte(key), nil)
return err
}
func (this *Server) SaveFileInfoToLevelDB(key string, fileInfo *FileInfo, db *leveldb.DB) (*FileInfo, error) {
var (
err error
data []byte
)
if fileInfo == nil || db == nil {
return nil, errors.New("fileInfo is null or db is null")
}
if data, err = json.Marshal(fileInfo); err != nil {
return fileInfo, err
}
if err = db.Put([]byte(key), data, nil); err != nil {
return fileInfo, err
}
return fileInfo, nil
}
func (this *Server) IsPeer(r *http.Request) bool {
var (
ip string
peer string
bflag bool
)
//return true
ip = this.util.GetClientIp(r)
if ip == "127.0.0.1" || ip == this.util.GetPulicIP() {
return true
}
if this.util.Contains(ip, Config().AdminIps) {
return true
}
ip = "http://" + ip
bflag = false
for _, peer = range Config().Peers {
if strings.HasPrefix(peer, ip) {
bflag = true
break
}
}
return bflag
}
func (this *Server) ReceiveMd5s(w http.ResponseWriter, r *http.Request) {
var (
err error
md5str string
fileInfo *FileInfo
md5s []string
)
if !this.IsPeer(r) {
log.Warn(fmt.Sprintf("ReceiveMd5s %s", this.util.GetClientIp(r)))
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
r.ParseForm()
md5str = r.FormValue("md5s")
md5s = strings.Split(md5str, ",")
AppendFunc := func(md5s []string) {
for _, m := range md5s {
if m != "" {
if fileInfo, err = this.GetFileInfoFromLevelDB(m); err != nil {
log.Error(err)
continue
}
this.AppendToQueue(fileInfo)
}
}
}
go AppendFunc(md5s)
}
func (this *Server) GetClusterNotPermitMessage(r *http.Request) string {
var (
message string
)
message = fmt.Sprintf(CONST_MESSAGE_CLUSTER_IP, this.util.GetClientIp(r))
return message
}
func (this *Server) GetMd5sForWeb(w http.ResponseWriter, r *http.Request) {
var (
date string
err error
result mapset.Set
lines []string
md5s []interface{}
)
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
date = r.FormValue("date")
if result, err = this.GetMd5sByDate(date, CONST_FILE_Md5_FILE_NAME); err != nil {
log.Error(err)
return
}
md5s = result.ToSlice()
for _, line := range md5s {
if line != nil && line != "" {
lines = append(lines, line.(string))
}
}
w.Write([]byte( strings.Join(lines, ",") ))
}
func (this *Server) GetMd5File(w http.ResponseWriter, r *http.Request) {
var (
date string
fpath string
data []byte
err error
)
if !this.IsPeer(r) {
return
}
fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
if !this.util.FileExists(fpath) {
w.WriteHeader(404)
return
}
if data, err = ioutil.ReadFile(fpath); err != nil {
w.WriteHeader(500)
return
}
w.Write(data)
}
func (this *Server) GetMd5sMapByDate(date string, filename string) (*CommonMap, error) {
var (
err error
result *CommonMap
fpath string
content string
lines []string
line string
cols []string
data []byte
)
result = &CommonMap{m: make(map[string]interface{})}
if filename == "" {
fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
} else {
fpath = DATA_DIR + "/" + date + "/" + filename
}
if !this.util.FileExists(fpath) {
return result, errors.New(fmt.Sprintf("fpath %s not found", fpath))
}
if data, err = ioutil.ReadFile(fpath); err != nil {
return result, err
}
content = string(data)
lines = strings.Split(content, "\n")
for _, line = range lines {
cols = strings.Split(line, "|")
if len(cols) > 2 {
if _, err = strconv.ParseInt(cols[1], 10, 64); err != nil {
continue
}
result.Add(cols[0])
}
}
return result, nil
}
func (this *Server) GetMd5sByDate(date string, filename string) (mapset.Set, error) {
var (
keyPrefix string
md5set mapset.Set
keys []string
)
md5set = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
keys = strings.Split(string(iter.Key()), "_")
if len(keys) >= 3 {
md5set.Add(keys[2])
}
}
iter.Release()
return md5set, nil
}
func (this *Server) SyncFileInfo(w http.ResponseWriter, r *http.Request) {
var (
err error
fileInfo FileInfo
fileInfoStr string
filename string
)
r.ParseForm()
if !this.IsPeer(r) {
return
}
fileInfoStr = r.FormValue("fileInfo")
if err = json.Unmarshal([]byte(fileInfoStr), &fileInfo); err != nil {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
log.Error(err)
return
}
if fileInfo.OffSet == -2 { // optimize migrate
this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb)
} else {
this.SaveFileMd5Log(&fileInfo, CONST_Md5_QUEUE_FILE_NAME)
}
this.AppendToDownloadQueue(&fileInfo)
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
p := strings.Replace(fileInfo.Path, STORE_DIR+"/", "", 1)
downloadUrl := fmt.Sprintf("http://%s/%s", r.Host, Config().Group+"/"+p+"/"+filename)
log.Info("SyncFileInfo: ", downloadUrl)
w.Write([]byte(downloadUrl))
}
func (this *Server) CheckScene(scene string) (bool, error) {
var (
scenes []string
)
if len(Config().Scenes) == 0 {
return true, nil
}
for _, s := range Config().Scenes {
scenes = append(scenes, strings.Split(s, ":")[0])
}
if !this.util.Contains(scene, scenes) {
return false, errors.New("not valid scene")
}
return true, nil
}
func (this *Server) GetFileInfo(w http.ResponseWriter, r *http.Request) {
var (
fpath string
md5sum string
fileInfo *FileInfo
err error
result JsonResult
)
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
result.Status = "fail"
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
md5sum = r.FormValue("md5")
if fpath != "" {
fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1)
md5sum = this.util.MD5(fpath)
}
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
result.Status = "ok"
result.Data = fileInfo
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
func (this *Server) RemoveFile(w http.ResponseWriter, r *http.Request) {
var (
err error
md5sum string
fileInfo *FileInfo
fpath string
delUrl string
result JsonResult
inner string
name string
)
_ = delUrl
_ = inner
r.ParseForm()
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
inner = r.FormValue("inner")
result.Status = "fail"
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
if fpath != "" && md5sum == "" {
fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1)
md5sum = this.util.MD5(fpath)
}
if inner != "1" {
for _, peer := range Config().Peers {
delFile := func(peer string, md5sum string, fileInfo *FileInfo) {
delUrl = fmt.Sprintf("%s%s", peer, this.getRequestURI("delete"))
req := httplib.Post(delUrl)
req.Param("md5", md5sum)
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*10)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
go delFile(peer, md5sum, fileInfo)
}
}
if len(md5sum) < 32 {
result.Message = "md5 unvalid"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if fileInfo.OffSet != -1 {
result.Message = "small file delete not support"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
name = fileInfo.Name
if fileInfo.ReName != "" {
name = fileInfo.ReName
}
fpath = fileInfo.Path + "/" + name
if fileInfo.Path != "" && this.util.FileExists(DOCKER_DIR+fpath) {
this.SaveFileMd5Log(fileInfo, CONST_REMOME_Md5_FILE_NAME)
if err = os.Remove(DOCKER_DIR + fpath); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
} else {
result.Message = "remove success"
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
}
result.Message = "fail remove"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) getRequestURI(action string) string {
var (
uri string
)
if Config().SupportGroupManage {
uri = "/" + Config().Group + "/" + action
} else {
uri = "/" + action
}
return uri
}
func (this *Server) BuildFileResult(fileInfo *FileInfo, r *http.Request) FileResult {
var (
outname string
fileResult FileResult
p string
downloadUrl string
domain string
)
if Config().DownloadDomain != "" {
domain = fmt.Sprintf("http://%s", Config().DownloadDomain)
} else {
domain = fmt.Sprintf("http://%s", r.Host)
}
outname = fileInfo.Name
if fileInfo.ReName != "" {
outname = fileInfo.ReName
}
p = strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1)
p = Config().Group + "/" + p + "/" + outname
downloadUrl = fmt.Sprintf("http://%s/%s", r.Host, p)
if Config().DownloadDomain != "" {
downloadUrl = fmt.Sprintf("http://%s/%s", Config().DownloadDomain, p)
}
fileResult.Url = downloadUrl
fileResult.Md5 = fileInfo.Md5
fileResult.Path = "/" + p
fileResult.Domain = domain
fileResult.Scene = fileInfo.Scene
fileResult.Size = fileInfo.Size
fileResult.ModTime = fileInfo.TimeStamp
// Just for Compatibility
fileResult.Src = fileResult.Path
fileResult.Scenes = fileInfo.Scene
return fileResult
}
func (this *Server) SaveUploadFile(file multipart.File, header *multipart.FileHeader, fileInfo *FileInfo, r *http.Request) (*FileInfo, error) {
var (
err error
outFile *os.File
folder string
fi os.FileInfo
)
defer file.Close()
fileInfo.Name = header.Filename
if Config().RenameFile {
fileInfo.ReName = this.util.MD5(this.util.GetUUID()) + path.Ext(fileInfo.Name)
}
folder = time.Now().Format("20060102/15/04")
if Config().PeerId != "" {
folder = fmt.Sprintf(folder+"/%s", Config().PeerId)
}
if fileInfo.Scene != "" {
folder = fmt.Sprintf(STORE_DIR+"/%s/%s", fileInfo.Scene, folder)
} else {
folder = fmt.Sprintf(STORE_DIR+"/%s", folder)
}
if fileInfo.Path != "" {
if strings.HasPrefix(fileInfo.Path, STORE_DIR) {
folder = fileInfo.Path
} else {
folder = STORE_DIR + "/" + fileInfo.Path
}
}
if !this.util.FileExists(folder) {
os.MkdirAll(folder, 0775)
}
outPath := fmt.Sprintf(folder+"/%s", fileInfo.Name)
if Config().RenameFile {
outPath = fmt.Sprintf(folder+"/%s", fileInfo.ReName)
}
if this.util.FileExists(outPath) && Config().EnableDistinctFile {
for i := 0; i < 10000; i++ {
outPath = fmt.Sprintf(folder+"/%d_%s", i, header.Filename)
fileInfo.Name = fmt.Sprintf("%d_%s", i, header.Filename)
if !this.util.FileExists(outPath) {
break
}
}
}
log.Info(fmt.Sprintf("upload: %s", outPath))
if outFile, err = os.Create(outPath); err != nil {
return fileInfo, err
}
defer outFile.Close()
if err != nil {
log.Error(err)
return fileInfo, errors.New("(error)fail," + err.Error())
}
if _, err = io.Copy(outFile, file); err != nil {
log.Error(err)
return fileInfo, errors.New("(error)fail," + err.Error())
}
if fi, err = outFile.Stat(); err != nil {
log.Error(err)
} else {
fileInfo.Size = fi.Size()
}
if fi.Size() != header.Size {
return fileInfo, errors.New("(error)file uncomplete")
}
v := this.util.GetFileSum(outFile, Config().FileSumArithmetic)
fileInfo.Md5 = v
//fileInfo.Path = folder //strings.Replace( folder,DOCKER_DIR,"",1)
fileInfo.Path = strings.Replace(folder, DOCKER_DIR, "", 1)
fileInfo.Peers = append(fileInfo.Peers, this.host)
//fmt.Println("upload",fileInfo)
return fileInfo, nil
}
func (this *Server) Upload(w http.ResponseWriter, r *http.Request) {
var (
err error
ok bool
// pathname string
md5sum string
fileInfo FileInfo
uploadFile multipart.File
uploadHeader *multipart.FileHeader
scene string
output string
fileResult FileResult
data []byte
code string
secret interface{}
)
output = r.FormValue("output")
if Config().EnableCrossOrigin {
this.CrossOrigin(w, r)
}
if Config().AuthUrl != "" {
if !this.CheckAuth(w, r) {
log.Warn("auth fail", r.Form)
this.NotPermit(w, r)
w.Write([]byte("auth fail"))
return
}
}
if r.Method == "POST" {
md5sum = r.FormValue("md5")
output = r.FormValue("output")
if Config().ReadOnly {
w.Write([]byte( "(error) readonly"))
return
}
if Config().EnableCustomPath {
fileInfo.Path = r.FormValue("path")
fileInfo.Path = strings.Trim(fileInfo.Path, "/")
}
scene = r.FormValue("scene")
code = r.FormValue("code")
if scene == "" {
//Just for Compatibility
scene = r.FormValue("scenes")
}
if Config().EnableGoogleAuth && scene != "" {
if secret, ok = this.sceneMap.GetValue(scene); ok {
if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) {
this.NotPermit(w, r)
w.Write([]byte("invalid request,error google code"))
return
}
}
}
fileInfo.Md5 = md5sum
fileInfo.OffSet = -1
if uploadFile, uploadHeader, err = r.FormFile("file"); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
return
}
fileInfo.Peers = []string{}
fileInfo.TimeStamp = time.Now().Unix()
if scene == "" {
scene = Config().DefaultScene
}
if output == "" {
output = "text"
}
if !this.util.Contains(output, []string{"json", "text"}) {
w.Write([]byte("output just support json or text"))
return
}
fileInfo.Scene = scene
if _, err = this.CheckScene(scene); err != nil {
w.Write([]byte(err.Error()))
return
}
if err != nil {
log.Error(err)
http.Redirect(w, r, "/", http.StatusMovedPermanently)
return
}
if _, err = this.SaveUploadFile(uploadFile, uploadHeader, &fileInfo, r); err != nil {
w.Write([]byte(err.Error()))
return
}
if Config().EnableDistinctFile {
if v, _ := this.GetFileInfoFromLevelDB(fileInfo.Md5); v != nil && v.Md5 != "" {
fileResult = this.BuildFileResult(v, r)
if Config().RenameFile {
os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName)
} else {
os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name)
}
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
return
}
}
if fileInfo.Md5 == "" {
log.Warn(" fileInfo.Md5 is null")
return
}
if md5sum != "" && fileInfo.Md5 != md5sum {
log.Warn(" fileInfo.Md5 and md5sum !=")
return
}
if Config().EnableMergeSmallFile && fileInfo.Size < CONST_SMALL_FILE_SIZE {
if err = this.SaveSmallFile(&fileInfo); err != nil {
log.Error(err)
return
}
}
this.saveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME) //maybe slow
go this.postFileToPeer(&fileInfo)
if fileInfo.Size <= 0 {
log.Error("file size is zero")
return
}
fileResult = this.BuildFileResult(&fileInfo, r)
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
return
} else {
md5sum = r.FormValue("md5")
output = r.FormValue("output")
if md5sum == "" {
w.Write([]byte("(error) if you want to upload fast md5 is require" +
",and if you want to upload file,you must use post method "))
return
}
if v, _ := this.GetFileInfoFromLevelDB(md5sum); v != nil && v.Md5 != "" {
fileResult = this.BuildFileResult(v, r)
}
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
}
}
func (this *Server) SaveSmallFile(fileInfo *FileInfo) (error) {
var (
err error
filename string
fpath string
srcFile *os.File
desFile *os.File
largeDir string
destPath string
reName string
fileExt string
)
filename = fileInfo.Name
fileExt = path.Ext(filename)
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
largeDir = LARGE_DIR + "/" + Config().PeerId
if !this.util.FileExists(largeDir) {
os.MkdirAll(largeDir, 0775)
}
reName = fmt.Sprintf("%d", this.util.RandInt(100, 300))
destPath = largeDir + "/" + reName
this.lockMap.LockKey(destPath)
defer this.lockMap.UnLockKey(destPath)
if this.util.FileExists(fpath) {
srcFile, err = os.OpenFile(fpath, os.O_CREATE|os.O_RDONLY, 06666)
if err != nil {
return err
}
defer srcFile.Close()
desFile, err = os.OpenFile(destPath, os.O_CREATE|os.O_RDWR, 06666)
if err != nil {
return err
}
defer desFile.Close()
fileInfo.OffSet, err = desFile.Seek(0, 2)
if _, err = desFile.Write([]byte("1")); err != nil { //first byte set 1
return err
}
fileInfo.OffSet, err = desFile.Seek(0, 2)
if err != nil {
return err
}
fileInfo.OffSet = fileInfo.OffSet - 1 //minus 1 byte
fileInfo.Size = fileInfo.Size + 1
fileInfo.ReName = fmt.Sprintf("%s,%d,%d,%s", reName, fileInfo.OffSet, fileInfo.Size, fileExt)
if _, err = io.Copy(desFile, srcFile); err != nil {
return err
}
srcFile.Close()
os.Remove(fpath)
fileInfo.Path = strings.Replace(largeDir, DOCKER_DIR, "", 1)
}
return nil
}
func (this *Server) SendToMail(to, subject, body, mailtype string) error {
host := Config().Mail.Host
user := Config().Mail.User
password := Config().Mail.Password
hp := strings.Split(host, ":")
auth := smtp.PlainAuth("", user, password, hp[0])
var contentType string
if mailtype == "html" {
contentType = "Content-Type: text/" + mailtype + "; charset=UTF-8"
} else {
contentType = "Content-Type: text/plain" + "; charset=UTF-8"
}
msg := []byte("To: " + to + "\r\nFrom: " + user + ">\r\nSubject: " + "\r\n" + contentType + "\r\n\r\n" + body)
sendTo := strings.Split(to, ";")
err := smtp.SendMail(host, auth, user, sendTo, msg)
return err
}
func (this *Server) BenchMark(w http.ResponseWriter, r *http.Request) {
t := time.Now()
batch := new(leveldb.Batch)
for i := 0; i < 100000000; i++ {
f := FileInfo{}
f.Peers = []string{"http://192.168.0.1", "http://192.168.2.5"}
f.Path = "20190201/19/02"
s := strconv.Itoa(i)
s = this.util.MD5(s)
f.Name = s
f.Md5 = s
if data, err := json.Marshal(&f); err == nil {
batch.Put([]byte(s), data)
}
if i%10000 == 0 {
if batch.Len() > 0 {
server.ldb.Write(batch, nil)
// batch = new(leveldb.Batch)
batch.Reset()
}
fmt.Println(i, time.Since(t).Seconds())
}
//fmt.Println(server.GetFileInfoFromLevelDB(s))
}
this.util.WriteFile("time.txt", time.Since(t).String())
fmt.Println(time.Since(t).String())
}
func (this *Server) RepairStatWeb(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
date string
inner string
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date = r.FormValue("date")
inner = r.FormValue("inner")
if ok, err := regexp.MatchString("\\d{8}", date); err != nil || !ok {
result.Message = "invalid date"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if date == "" || len(date) != 8 {
date = this.util.GetToDay()
}
if inner != "1" {
for _, peer := range Config().Peers {
req := httplib.Post(peer + this.getRequestURI("repair_stat"))
req.Param("inner", "1")
req.Param("date", date)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
result.Data = this.RepairStatByDate(date)
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Stat(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
inner string
echart string
category []string
barCount []int64
barSize []int64
dataMap map[string]interface{}
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
r.ParseForm()
inner = r.FormValue("inner")
echart = r.FormValue("echart")
data := this.GetStat()
result.Status = "ok"
result.Data = data
if echart == "1" {
dataMap = make(map[string]interface{}, 3)
for _, v := range data {
barCount = append(barCount, v.FileCount)
barSize = append(barSize, v.TotalSize)
category = append(category, v.Date)
}
dataMap["category"] = category
dataMap["barCount"] = barCount
dataMap["barSize"] = barSize
result.Data = dataMap
}
if inner == "1" {
w.Write([]byte(this.util.JsonEncodePretty(data)))
} else {
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) GetStat() []StatDateFileInfo {
var (
min int64
max int64
err error
i int64
rows []StatDateFileInfo
total StatDateFileInfo
)
min = 20190101
max = 20190101
for k := range this.statMap.Get() {
ks := strings.Split(k, "_")
if len(ks) == 2 {
if i, err = strconv.ParseInt(ks[0], 10, 64); err != nil {
continue
}
if i >= max {
max = i
}
if i < min {
min = i
}
}
}
for i := min; i <= max; i++ {
s := fmt.Sprintf("%d", i)
if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_TOTAL_SIZE_KEY); ok {
var info StatDateFileInfo
info.Date = s
switch v.(type) {
case int64:
info.TotalSize = v.(int64)
total.TotalSize = total.TotalSize + v.(int64)
}
if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_COUNT_KEY); ok {
switch v.(type) {
case int64:
info.FileCount = v.(int64)
total.FileCount = total.FileCount + v.(int64)
}
}
rows = append(rows, info)
}
}
total.Date = "all"
rows = append(rows, total)
return rows
}
func (this *Server) RegisterExit() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
go func() {
for s := range c {
switch s {
case syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT:
this.ldb.Close()
log.Info("Exit", s)
os.Exit(1)
}
}
}()
}
func (this *Server) AppendToQueue(fileInfo *FileInfo) {
for (len(this.queueToPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE {
time.Sleep(time.Second * 1)
}
this.queueToPeers <- *fileInfo
}
func (this *Server) AppendToDownloadQueue(fileInfo *FileInfo) {
for (len(this.queueFromPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE {
time.Sleep(time.Second * 1)
}
this.queueFromPeers <- *fileInfo
}
func (this *Server) ConsumerDownLoad() {
ConsumerFunc := func() {
for {
fileInfo := <-this.queueFromPeers
if len(fileInfo.Peers) <= 0 {
log.Warn("Peer is null", fileInfo)
continue
}
for _, peer := range fileInfo.Peers {
if strings.Contains(peer, "127.0.0.1") {
log.Warn("sync error with 127.0.0.1", fileInfo)
continue
}
if peer != this.host {
this.DownloadFromPeer(peer, &fileInfo)
break
}
}
}
}
for i := 0; i < 50; i++ {
go ConsumerFunc()
}
}
func (this *Server) ConsumerLog() {
go func() {
var (
fileLog *FileLog
)
for {
fileLog = <-this.queueFileLog
this.saveFileMd5Log(fileLog.FileInfo, fileLog.FileName)
}
}()
}
func (this *Server) LoadSearchDict() {
go func() {
log.Info("Load search dict ....")
f, err := os.Open(CONST_SEARCH_FILE_NAME)
if err != nil {
log.Error(err)
return
}
defer f.Close()
r := bufio.NewReader(f)
for {
line, isprefix, err := r.ReadLine()
for isprefix && err == nil {
kvs := strings.Split(string(line), "\t")
if len(kvs) == 2 {
this.searchMap.Put(kvs[0], kvs[1])
}
}
}
log.Info("finish load search dict")
}()
}
func (this *Server) SaveSearchDict() {
var (
err error
fp *os.File
searchDict map[string]interface{}
k string
v interface{}
)
this.lockMap.LockKey(CONST_SEARCH_FILE_NAME)
defer this.lockMap.UnLockKey(CONST_SEARCH_FILE_NAME)
searchDict = this.searchMap.Get()
fp, err = os.OpenFile(CONST_SEARCH_FILE_NAME, os.O_RDWR, 0755)
if err != nil {
log.Error(err)
return
}
defer fp.Close()
for k, v = range searchDict {
fp.WriteString(fmt.Sprintf("%s\t%s", k, v.(string)))
}
}
func (this *Server) ConsumerPostToPeer() {
ConsumerFunc := func() {
for {
fileInfo := <-this.queueToPeers
this.postFileToPeer(&fileInfo)
}
}
for i := 0; i < 50; i++ {
go ConsumerFunc()
}
}
func (this *Server) AutoRepair(forceRepair bool) {
if this.lockMap.IsLock("AutoRepair") {
log.Warn("Lock AutoRepair")
return
}
this.lockMap.LockKey("AutoRepair")
defer this.lockMap.UnLockKey("AutoRepair")
AutoRepairFunc := func(forceRepair bool) {
var (
dateStats []StatDateFileInfo
err error
countKey string
md5s string
localSet mapset.Set
remoteSet mapset.Set
allSet mapset.Set
tmpSet mapset.Set
fileInfo *FileInfo
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("AutoRepair")
log.Error(re)
log.Error(string(buffer))
}
}()
Update := func(peer string, dateStat StatDateFileInfo) { //从远端拉数据过来
req := httplib.Get(fmt.Sprintf("%s%s?date=%s&force=%s", peer, this.getRequestURI("sync"), dateStat.Date, "1"))
req.SetTimeout(time.Second*5, time.Second*5)
if _, err = req.String(); err != nil {
log.Error(err)
}
log.Info(fmt.Sprintf("syn file from %s date %s", peer, dateStat.Date))
}
for _, peer := range Config().Peers {
req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("stat")))
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*15)
if err = req.ToJSON(&dateStats); err != nil {
log.Error(err)
continue
}
for _, dateStat := range dateStats {
if dateStat.Date == "all" {
continue
}
countKey = dateStat.Date + "_" + CONST_STAT_FILE_COUNT_KEY
if v, ok := this.statMap.GetValue(countKey); ok {
switch v.(type) {
case int64:
if v.(int64) != dateStat.FileCount || forceRepair { //不相等,找差异
//TODO
req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("get_md5s_by_date")))
req.SetTimeout(time.Second*15, time.Second*60)
req.Param("date", dateStat.Date)
if md5s, err = req.String(); err != nil {
continue
}
if localSet, err = this.GetMd5sByDate(dateStat.Date, CONST_FILE_Md5_FILE_NAME); err != nil {
log.Error(err)
continue
}
remoteSet = this.util.StrToMapSet(md5s, ",")
allSet = localSet.Union(remoteSet)
md5s = this.util.MapSetToStr(allSet.Difference(localSet), ",")
req = httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("receive_md5s")))
req.SetTimeout(time.Second*15, time.Second*60)
req.Param("md5s", md5s)
req.String()
tmpSet = allSet.Difference(remoteSet)
for v := range tmpSet.Iter() {
if v != nil {
if fileInfo, err = this.GetFileInfoFromLevelDB(v.(string)); err != nil {
log.Error(err)
continue
}
this.AppendToQueue(fileInfo)
}
}
//Update(peer,dateStat)
}
}
} else {
Update(peer, dateStat)
}
}
}
}
AutoRepairFunc(forceRepair)
}
func (this *Server) CleanLogLevelDBByDate(date string, filename string) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CleanLogLevelDBByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
keys mapset.Set
)
keys = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
keys.Add(string(iter.Value()))
}
iter.Release()
for key := range keys.Iter() {
err = this.RemoveKeyFromLevelDB(key.(string), this.logDB)
if err != nil {
log.Error(err)
}
}
}
func (this *Server) CleanAndBackUp() {
Clean := func() {
var (
filenames []string
yesterday string
)
if this.curDate != this.util.GetToDay() {
filenames = []string{CONST_Md5_QUEUE_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_REMOME_Md5_FILE_NAME}
yesterday = this.util.GetDayFromTimeStamp(time.Now().AddDate(0, 0, -1).Unix())
for _, filename := range filenames {
this.CleanLogLevelDBByDate(yesterday, filename)
}
this.BackUpMetaDataByDate(yesterday)
this.curDate = this.util.GetToDay()
}
}
go func() {
for {
time.Sleep(time.Hour * 6)
Clean()
}
}()
}
func (this *Server) LoadFileInfoByDate(date string, filename string) (mapset.Set, error) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("LoadFileInfoByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
fileInfos mapset.Set
)
fileInfos = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
var fileInfo FileInfo
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
fileInfos.Add(&fileInfo)
}
iter.Release()
return fileInfos, nil
}
func (this *Server) LoadQueueSendToPeer() {
if queue, err := this.LoadFileInfoByDate(this.util.GetToDay(), CONST_Md5_QUEUE_FILE_NAME); err != nil {
log.Error(err)
} else {
for fileInfo := range queue.Iter() {
//this.queueFromPeers <- *fileInfo.(*FileInfo)
this.AppendToDownloadQueue(fileInfo.(*FileInfo))
}
}
}
func (this *Server) CheckClusterStatus() {
check := func() {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CheckClusterStatus")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
status JsonResult
err error
subject string
body string
req *httplib.BeegoHTTPRequest
)
for _, peer := range Config().Peers {
req = httplib.Get(fmt.Sprintf("%s%s", peer, this.getRequestURI("status")))
req.SetTimeout(time.Second*5, time.Second*5)
err = req.ToJSON(&status)
if status.Status != "ok" {
for _, to := range Config().AlramReceivers {
subject = "fastdfs server error"
if err != nil {
body = fmt.Sprintf("%s\nserver:%s\nerror:\n%s", subject, peer, err.Error())
} else {
body = fmt.Sprintf("%s\nserver:%s\n", subject, peer)
}
if err = this.SendToMail(to, subject, body, "text"); err != nil {
log.Error(err)
}
}
if Config().AlarmUrl != "" {
req = httplib.Post(Config().AlarmUrl)
req.SetTimeout(time.Second*10, time.Second*10)
req.Param("message", body)
req.Param("subject", subject)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
}
}
}
go func() {
for {
time.Sleep(time.Minute * 10)
check()
}
}()
}
func (this *Server) RepairFileInfo(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
if !Config().EnableMigrate {
w.Write([]byte("please set enable_migrate=true"))
return
}
result.Status = "ok"
result.Message = "repair job start,don't try again,very danger "
go this.RepairFileInfoFromFile()
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Reload(w http.ResponseWriter, r *http.Request) {
var (
err error
data []byte
cfg GloablConfig
action string
cfgjson string
result JsonResult
)
result.Status = "fail"
r.ParseForm()
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
cfgjson = r.FormValue("cfg")
action = r.FormValue("action")
_ = cfgjson
if action == "get" {
result.Data = Config()
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "set" {
if cfgjson == "" {
result.Message = "(error)parameter cfg(json) require"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if err = json.Unmarshal([]byte(cfgjson), &cfg); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
result.Status = "ok"
cfgjson = this.util.JsonEncodePretty(cfg)
this.util.WriteFile(CONST_CONF_FILE_NAME, cfgjson)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "reload" {
if data, err = ioutil.ReadFile(CONST_CONF_FILE_NAME); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if err = json.Unmarshal(data, &cfg); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
ParseConfig(CONST_CONF_FILE_NAME)
this.initComponent(true)
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "" {
w.Write([]byte("(error)action support set(json) get reload"))
}
}
func (this *Server) RemoveEmptyDir(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
result.Status = "ok"
if this.IsPeer(r) {
go this.util.RemoveEmptyDir(DATA_DIR)
go this.util.RemoveEmptyDir(STORE_DIR)
result.Message = "clean job start ..,don't try again!!!"
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) BackUp(w http.ResponseWriter, r *http.Request) {
var (
err error
date string
result JsonResult
inner string
url string
)
result.Status = "ok"
r.ParseForm()
date = r.FormValue("date")
inner = r.FormValue("inner")
if date == "" {
date = this.util.GetToDay()
}
if this.IsPeer(r) {
if inner != "1" {
for _, peer := range Config().Peers {
backUp := func(peer string, date string) {
url = fmt.Sprintf("%s%s", peer, this.getRequestURI("backup"))
req := httplib.Post(url)
req.Param("date", date)
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*600)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
go backUp(peer, date)
}
}
go this.BackUpMetaDataByDate(date)
result.Message = "back job start..."
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
// Notice: performance is poor,just for low capacity,but low memory , if you want to high performance,use searchMap for search,but memory ....
func (this *Server) Search(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
err error
kw string
count int
fileInfos []FileInfo
md5s []string
)
kw = r.FormValue("kw")
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
iter := this.ldb.NewIterator(nil, nil)
for iter.Next() {
var fileInfo FileInfo
value := iter.Value()
if err = json.Unmarshal(value, &fileInfo); err != nil {
log.Error(err)
continue
}
if strings.Contains(fileInfo.Name, kw) && !this.util.Contains(fileInfo.Md5, md5s) {
count = count + 1
fileInfos = append(fileInfos, fileInfo)
md5s = append(md5s, fileInfo.Md5)
}
if count >= 100 {
break
}
}
iter.Release()
err = iter.Error()
if err != nil {
log.Error()
}
//fileInfos=this.SearchDict(kw) // serch file from map for huge capacity
result.Status = "ok"
result.Data = fileInfos
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) SearchDict(kw string) []FileInfo {
var (
fileInfos []FileInfo
fileInfo *FileInfo
)
for dict := range this.searchMap.Iter() {
if strings.Contains(dict.Val.(string), kw) {
if fileInfo, _ = this.GetFileInfoFromLevelDB(dict.Key); fileInfo != nil {
fileInfos = append(fileInfos, *fileInfo)
}
}
}
return fileInfos
}
func (this *Server) ListDir(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
dir string
filesInfo []os.FileInfo
err error
filesResult []FileInfoResult
tmpDir string
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
dir = r.FormValue("dir")
//if dir == "" {
// result.Message = "dir can't null"
// w.Write([]byte(this.util.JsonEncodePretty(result)))
// return
//}
dir = strings.Replace(dir, ".", "", -1)
if tmpDir, err = os.Readlink(dir); err == nil {
dir = tmpDir
}
filesInfo, err = ioutil.ReadDir(DOCKER_DIR + STORE_DIR_NAME + "/" + dir)
if err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
for _, f := range filesInfo {
fi := FileInfoResult{
Name: f.Name(),
Size: f.Size(),
IsDir: f.IsDir(),
ModTime: f.ModTime(),
Path: dir,
Md5: this.util.MD5(STORE_DIR_NAME + "/" + dir + "/" + f.Name()),
}
filesResult = append(filesResult, fi)
}
result.Status = "ok"
result.Data = filesResult
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
func (this *Server) VerifyGoogleCode(secret string, code string, discrepancy int64) bool {
var (
goauth *googleAuthenticator.GAuth
)
goauth = googleAuthenticator.NewGAuth()
if ok, err := goauth.VerifyCode(secret, code, discrepancy); ok {
return ok
} else {
log.Error(err)
return ok
}
}
func (this *Server) GenGoogleCode(w http.ResponseWriter, r *http.Request) {
var (
err error
result JsonResult
secret string
goauth *googleAuthenticator.GAuth
)
r.ParseForm()
goauth = googleAuthenticator.NewGAuth()
secret = r.FormValue("secret")
result.Status = "ok"
result.Message = "ok"
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if result.Data, err = goauth.GetCode(secret); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) GenGoogleSecret(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
result.Status = "ok"
result.Message = "ok"
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
GetSeed := func(length int) string {
seeds := "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
s := ""
random.Seed(time.Now().UnixNano())
for i := 0; i < length; i++ {
s += string(seeds[random.Intn(32)])
}
return s
}
result.Data = GetSeed(16)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Report(w http.ResponseWriter, r *http.Request) {
var (
reportFileName string
result JsonResult
html string
)
result.Status = "ok"
r.ParseForm()
if this.IsPeer(r) {
reportFileName = STATIC_DIR + "/report.html"
if this.util.IsExist(reportFileName) {
if data, err := this.util.ReadBinFile(reportFileName); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
} else {
html = string(data)
if Config().SupportGroupManage {
html = strings.Replace(html, "{group}", "/"+Config().Group, 1)
} else {
html = strings.Replace(html, "{group}", "", 1)
}
w.Write([]byte(html))
return
}
} else {
w.Write([]byte(fmt.Sprintf("%s is not found", reportFileName)))
}
} else {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
}
}
func (this *Server) Repair(w http.ResponseWriter, r *http.Request) {
var (
force string
forceRepair bool
result JsonResult
)
result.Status = "ok"
r.ParseForm()
force = r.FormValue("force")
if force == "1" {
forceRepair = true
}
if this.IsPeer(r) {
go this.AutoRepair(forceRepair)
result.Message = "repair job start..."
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) Status(w http.ResponseWriter, r *http.Request) {
var (
status JsonResult
sts map[string]interface{}
today string
sumset mapset.Set
ok bool
v interface{}
)
memStat := new(runtime.MemStats)
runtime.ReadMemStats(memStat)
today = this.util.GetToDay()
sts = make(map[string]interface{})
sts["Fs.QueueFromPeers"] = len(this.queueFromPeers)
sts["Fs.QueueToPeers"] = len(this.queueToPeers)
sts["Fs.QueueFileLog"] = len(this.queueFileLog)
for _, k := range []string{CONST_FILE_Md5_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_Md5_QUEUE_FILE_NAME} {
k2 := fmt.Sprintf("%s_%s", today, k)
if v, ok = this.sumMap.GetValue(k2); ok {
sumset = v.(mapset.Set)
if k == CONST_Md5_QUEUE_FILE_NAME {
sts["Fs.QueueSetSize"] = sumset.Cardinality()
}
if k == CONST_Md5_ERROR_FILE_NAME {
sts["Fs.ErrorSetSize"] = sumset.Cardinality()
}
if k == CONST_FILE_Md5_FILE_NAME {
sts["Fs.FileSetSize"] = sumset.Cardinality()
}
}
}
sts["Fs.AutoRepair"] = Config().AutoRepair
sts["Fs.RefreshInterval"] = Config().RefreshInterval
sts["Fs.Peers"] = Config().Peers
sts["Fs.Local"] = this.host
sts["Fs.FileStats"] = this.GetStat()
sts["Fs.ShowDir"] = Config().ShowDir
sts["Sys.NumGoroutine"] = runtime.NumGoroutine()
sts["Sys.NumCpu"] = runtime.NumCPU()
sts["Sys.Alloc"] = memStat.Alloc
sts["Sys.TotalAlloc"] = memStat.TotalAlloc
sts["Sys.HeapAlloc"] = memStat.HeapAlloc
sts["Sys.Frees"] = memStat.Frees
sts["Sys.HeapObjects"] = memStat.HeapObjects
sts["Sys.NumGC"] = memStat.NumGC
sts["Sys.GCCPUFraction"] = memStat.GCCPUFraction
sts["Sys.GCSys"] = memStat.GCSys
//sts["Sys.MemInfo"] = memStat
status.Status = "ok"
status.Data = sts
w.Write([]byte(this.util.JsonEncodePretty(status)))
}
func (this *Server) HeartBeat(w http.ResponseWriter, r *http.Request) {
}
func (this *Server) Index(w http.ResponseWriter, r *http.Request) {
var (
uploadUrl string
uploadBigUrl string
uppy string
)
uploadUrl = "/upload"
uploadBigUrl = CONST_BIG_UPLOAD_PATH_SUFFIX
if Config().EnableWebUpload {
if Config().SupportGroupManage {
uploadUrl = fmt.Sprintf("/%s/upload", Config().Group)
uploadBigUrl = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX)
}
uppy = `<html>
<head>
<meta charset="utf-8" />
<title>go-fastdfs</title>
<style>form { bargin } .form-line { display:block;height: 30px;margin:8px; } #stdUpload {background: #fafafa;border-radius: 10px;width: 745px; }</style>
<link href="https://transloadit.edgly.net/releases/uppy/v0.30.0/dist/uppy.min.css" rel="stylesheet"></head>
<body>
<div>标准上传(强列建议使用这种方式)</div>
<div id="stdUpload">
<form action="%s" method="post" enctype="multipart/form-data">
<span class="form-line">文件(file):
<input type="file" id="file" name="file" /></span>
<span class="form-line">场景(scene):
<input type="text" id="scene" name="scene" value="%s" /></span>
<span class="form-line">输出(output):
<input type="text" id="output" name="output" value="json" /></span>
<span class="form-line">自定义路径(path):
<input type="text" id="path" name="path" value="" /></span>
<span class="form-line">google认证码(code):
<input type="text" id="code" name="code" value="" /></span>
<span class="form-line">自定义认证(auth_token):
<input type="text" id="auth_token" name="auth_token" value="" /></span>
<input type="submit" name="submit" value="upload" />
</form>
</div>
<div>断点续传(如果文件很大时可以考虑)</div>
<div>
<div id="drag-drop-area"></div>
<script src="https://transloadit.edgly.net/releases/uppy/v0.30.0/dist/uppy.min.js"></script>
<script>var uppy = Uppy.Core().use(Uppy.Dashboard, {
inline: true,
target: '#drag-drop-area'
}).use(Uppy.Tus, {
endpoint: '%s'
})
uppy.on('complete', (result) => {
// console.log(result) console.log('Upload complete! We’ve uploaded these files:', result.successful)
})
uppy.setMeta({ auth_token: '9ee60e59-cb0f-4578-aaba-29b9fc2919ca',callback_url:'http://127.0.0.1/callback' })//这里是传递上传的认证参数,callback_url参数中 id为文件的ID,info 文转的基本信息json
</script>
</div>
</body>
</html>`
uppyFileName := STATIC_DIR + "/uppy.html"
if this.util.IsExist(uppyFileName) {
if data, err := this.util.ReadBinFile(uppyFileName); err != nil {
log.Error(err)
} else {
uppy = string(data)
}
} else {
this.util.WriteFile(uppyFileName, uppy)
}
fmt.Fprintf(w,
fmt.Sprintf(uppy, uploadUrl, Config().DefaultScene, uploadBigUrl))
} else {
w.Write([]byte("web upload deny"))
}
}
func init() {
DOCKER_DIR = os.Getenv("GO_FASTDFS_DIR")
if DOCKER_DIR != "" {
if !strings.HasSuffix(DOCKER_DIR, "/") {
DOCKER_DIR = DOCKER_DIR + "/"
}
}
STORE_DIR = DOCKER_DIR + STORE_DIR_NAME
CONF_DIR = DOCKER_DIR + CONF_DIR_NAME
DATA_DIR = DOCKER_DIR + DATA_DIR_NAME
LOG_DIR = DOCKER_DIR + LOG_DIR_NAME
STATIC_DIR = DOCKER_DIR + STATIC_DIR_NAME
LARGE_DIR_NAME = "haystack"
LARGE_DIR = STORE_DIR + "/haystack"
CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db"
CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db"
CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json"
CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json"
CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt"
FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR}
logAccessConfigStr = strings.Replace(logAccessConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1)
logConfigStr = strings.Replace(logConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1)
for _, folder := range FOLDERS {
os.MkdirAll(folder, 0775)
}
server = NewServer()
flag.Parse()
peerId := fmt.Sprintf("%d", server.util.RandInt(0, 9))
if !server.util.FileExists(CONST_CONF_FILE_NAME) {
peer := "http://" + server.util.GetPulicIP() + ":8080"
cfg := fmt.Sprintf(cfgJson, peerId, peer, peer)
server.util.WriteFile(CONST_CONF_FILE_NAME, cfg)
}
if logger, err := log.LoggerFromConfigAsBytes([]byte(logConfigStr)); err != nil {
panic(err)
} else {
log.ReplaceLogger(logger)
}
if _logacc, err := log.LoggerFromConfigAsBytes([]byte(logAccessConfigStr)); err == nil {
logacc = _logacc
log.Info("succes init log access")
} else {
log.Error(err.Error())
}
ParseConfig(CONST_CONF_FILE_NAME)
if Config().QueueSize == 0 {
Config().QueueSize = CONST_QUEUE_SIZE
}
if Config().PeerId == "" {
Config().PeerId = peerId
}
staticHandler = http.StripPrefix("/"+Config().Group+"/", http.FileServer(http.Dir(STORE_DIR)))
server.initComponent(false)
}
func (this *Server) test() {
testLock := func() {
tt := func(i int) {
if server.lockMap.IsLock("xx") {
return
}
server.lockMap.LockKey("xx")
defer server.lockMap.UnLockKey("xx")
//time.Sleep(time.Nanosecond*1)
fmt.Println("xx", i)
}
for i := 0; i < 10000; i++ {
go tt(i)
}
time.Sleep(time.Second * 3)
go tt(999999)
go tt(999999)
go tt(999999)
}
_ = testLock
testFile := func() {
var (
err error
f *os.File
)
f, err = os.OpenFile("tt", os.O_CREATE|os.O_RDWR, 0777)
if err != nil {
fmt.Println(err)
}
f.WriteAt([]byte("1"), 100)
f.Seek(0, 2)
f.Write([]byte("2"))
//fmt.Println(f.Seek(0, 2))
//fmt.Println(f.Seek(3, 2))
//fmt.Println(f.Seek(3, 0))
//fmt.Println(f.Seek(3, 1))
//fmt.Println(f.Seek(3, 0))
//f.Write([]byte("1"))
}
_ = testFile
//testFile()
}
type hookDataStore struct {
tusd.DataStore
}
func (store hookDataStore) NewUpload(info tusd.FileInfo) (id string, err error) {
if Config().AuthUrl != "" {
if auth_token, ok := info.MetaData["auth_token"]; !ok {
msg := "token auth fail,auth_token is not in http header Upload-Metadata," +
"in uppy uppy.setMeta({ auth_token: '9ee60e59-cb0f-4578-aaba-29b9fc2919ca' })"
log.Error(msg, fmt.Sprintf("current header:%v", info.MetaData))
return "", errors.New(msg)
} else {
req := httplib.Post(Config().AuthUrl)
req.Param("auth_token", auth_token)
req.SetTimeout(time.Second*5, time.Second*10)
content, err := req.String()
if err != nil {
log.Error(err)
return "", err
}
if strings.TrimSpace(content) != "ok" {
return "", err
}
}
}
return store.DataStore.NewUpload(info)
}
func (this *Server) initTus() {
var (
err error
fileLog *os.File
bigDir string
)
BIG_DIR := STORE_DIR + "/_big/" + Config().PeerId
os.MkdirAll(BIG_DIR, 0775)
os.MkdirAll(LOG_DIR, 0775)
store := filestore.FileStore{
Path: BIG_DIR,
}
if fileLog, err = os.OpenFile(LOG_DIR+"/tusd.log", os.O_CREATE|os.O_RDWR, 0666); err != nil {
log.Error(err)
panic("initTus")
}
go func() {
for {
if fi, err := fileLog.Stat(); err != nil {
log.Error(err)
} else {
if fi.Size() > 1024*1024*500 { //500M
this.util.CopyFile(LOG_DIR+"/tusd.log", LOG_DIR+"/tusd.log.2")
fileLog.Seek(0, 0)
fileLog.Truncate(0)
fileLog.Seek(0, 2)
}
}
time.Sleep(time.Second * 30)
}
}()
l := slog.New(fileLog, "[tusd] ", slog.LstdFlags)
bigDir = CONST_BIG_UPLOAD_PATH_SUFFIX
if Config().SupportGroupManage {
bigDir = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX)
}
composer := tusd.NewStoreComposer()
// support raw tus upload and download
store.GetReaderExt = func(id string) (io.Reader, error) {
var (
offset int64
err error
length int
buffer []byte
fi *FileInfo
)
if fi, err = this.GetFileInfoFromLevelDB(id); err != nil {
log.Error(err)
return nil, err
} else {
fp := DOCKER_DIR + fi.Path + "/" + fi.ReName
if this.util.FileExists(fp) {
log.Info(fmt.Sprintf("download:%s", fp))
return os.Open(fp)
}
ps := strings.Split(fp, ",")
if len(ps) > 2 && this.util.FileExists(ps[0]) {
if length, err = strconv.Atoi(ps[2]); err != nil {
return nil, err
}
if offset, err = strconv.ParseInt(ps[1], 10, 64); err != nil {
return nil, err
}
if buffer, err = this.util.ReadFileByOffSet(ps[0], offset, length); err != nil {
return nil, err
}
if buffer[0] == '1' {
bufferReader := bytes.NewBuffer(buffer[1:])
return bufferReader, nil
} else {
msg := "data no sync"
log.Error(msg)
return nil, errors.New(msg)
}
}
return nil, errors.New(fmt.Sprintf("%s not found", fp))
}
}
store.UseIn(composer)
SetupPreHooks := func(composer *tusd.StoreComposer) {
composer.UseCore(hookDataStore{
DataStore: composer.Core,
})
}
SetupPreHooks(composer)
handler, err := tusd.NewHandler(tusd.Config{
Logger: l,
BasePath: bigDir,
StoreComposer: composer,
NotifyCompleteUploads: true,
RespectForwardedHeaders: true,
})
notify := func(handler *tusd.Handler) {
for {
select {
case info := <-handler.CompleteUploads:
log.Info("CompleteUploads", info)
name := ""
if v, ok := info.MetaData["filename"]; ok {
name = v
}
var err error
md5sum := ""
oldFullPath := BIG_DIR + "/" + info.ID + ".bin"
infoFullPath := BIG_DIR + "/" + info.ID + ".info"
if md5sum, err = this.util.GetFileSumByName(oldFullPath, Config().FileSumArithmetic); err != nil {
log.Error(err)
continue
}
ext := path.Ext(name)
filename := md5sum + ext
timeStamp := time.Now().Unix()
fpath := time.Now().Format("/20060102/15/04/")
newFullPath := STORE_DIR + "/" + Config().DefaultScene + fpath + Config().PeerId + "/" + filename
if fi, err := this.GetFileInfoFromLevelDB(md5sum); err != nil {
log.Error(err)
} else {
if fi.Md5 != "" {
if _, err := this.SaveFileInfoToLevelDB(info.ID, fi, this.ldb); err != nil {
log.Error(err)
}
log.Info(fmt.Sprintf("file is found md5:%s", fi.Md5))
log.Info("remove file:", oldFullPath)
log.Info("remove file:", infoFullPath)
os.Remove(oldFullPath)
os.Remove(infoFullPath)
continue
}
}
fpath = STORE_DIR_NAME + "/" + Config().DefaultScene + fpath + Config().PeerId
os.MkdirAll(DOCKER_DIR+fpath, 0775)
fileInfo := &FileInfo{
Name: name,
Path: fpath,
ReName: filename,
Size: info.Size,
TimeStamp: timeStamp,
Md5: md5sum,
Peers: []string{this.host},
OffSet: -1,
}
if err = os.Rename(oldFullPath, newFullPath); err != nil {
log.Error(err)
continue
}
log.Info(fileInfo)
os.Remove(infoFullPath)
if _, err = this.SaveFileInfoToLevelDB(info.ID, fileInfo, this.ldb); err != nil { //assosiate file id
log.Error(err)
}
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
go this.postFileToPeer(fileInfo)
callBack := func(info tusd.FileInfo, fileInfo *FileInfo) {
if callback_url, ok := info.MetaData["callback_url"]; ok {
req := httplib.Post(callback_url)
req.SetTimeout(time.Second*10, time.Second*10)
req.Param("info", server.util.JsonEncodePretty(fileInfo))
req.Param("id", info.ID)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
go callBack(info, fileInfo)
}
}
}
go notify(handler)
if err != nil {
log.Error(err)
}
http.Handle(bigDir, http.StripPrefix(bigDir, handler))
}
func (this *Server) FormatStatInfo() {
var (
data []byte
err error
count int64
stat map[string]interface{}
)
if this.util.FileExists(CONST_STAT_FILE_NAME) {
if data, err = this.util.ReadBinFile(CONST_STAT_FILE_NAME); err != nil {
log.Error(err)
} else {
if err = json.Unmarshal(data, &stat); err != nil {
log.Error(err)
} else {
for k, v := range stat {
switch v.(type) {
case float64:
vv := strings.Split(fmt.Sprintf("%f", v), ".")[0]
if count, err = strconv.ParseInt(vv, 10, 64); err != nil {
log.Error(err)
} else {
this.statMap.Put(k, count)
}
default:
this.statMap.Put(k, v)
}
}
}
}
} else {
this.RepairStatByDate(this.util.GetToDay())
}
}
func (this *Server) initComponent(isReload bool) {
var (
ip string
)
ip = this.util.GetPulicIP()
if Config().Host == "" {
if len(strings.Split(Config().Addr, ":")) == 2 {
server.host = fmt.Sprintf("http://%s:%s", ip, strings.Split(Config().Addr, ":")[1])
Config().Host = server.host
}
} else {
if strings.HasPrefix(Config().Host, "http") {
server.host = Config().Host
} else {
server.host = "http://" + Config().Host
}
}
ex, _ := regexp.Compile("\\d+\\.\\d+\\.\\d+\\.\\d+")
var peers []string
for _, peer := range Config().Peers {
if this.util.Contains(ip, ex.FindAllString(peer, -1)) ||
this.util.Contains("127.0.0.1", ex.FindAllString(peer, -1)) {
continue
}
if strings.HasPrefix(peer, "http") {
peers = append(peers, peer)
} else {
peers = append(peers, "http://"+peer)
}
}
Config().Peers = peers
if !isReload {
this.FormatStatInfo()
if Config().EnableTus {
this.initTus()
}
}
for _, s := range Config().Scenes {
kv := strings.Split(s, ":")
if len(kv) == 2 {
this.sceneMap.Put(kv[0], kv[1])
}
}
}
type HttpHandler struct {
}
func (HttpHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
status_code := "200"
defer func(t time.Time) {
logStr := fmt.Sprintf("[Access] %s | %v | %s | %s | %s | %s |%s",
time.Now().Format("2006/01/02 - 15:04:05"),
res.Header(),
time.Since(t).String(),
server.util.GetClientIp(req),
req.Method,
status_code,
req.RequestURI,
)
logacc.Info(logStr)
}(time.Now())
defer func() {
if err := recover(); err != nil {
status_code = "500"
res.WriteHeader(500)
print(err)
buff := debug.Stack()
log.Error(err)
log.Error(string(buff))
}
}()
if Config().EnableCrossOrigin {
server.CrossOrigin(res, req)
}
http.DefaultServeMux.ServeHTTP(res, req)
}
func (this *Server) Main() {
go func() {
for {
this.CheckFileAndSendToPeer(this.util.GetToDay(), CONST_Md5_ERROR_FILE_NAME, false)
//fmt.Println("CheckFileAndSendToPeer")
time.Sleep(time.Second * time.Duration(Config().RefreshInterval))
//this.util.RemoveEmptyDir(STORE_DIR)
}
}()
go this.CleanAndBackUp()
go this.CheckClusterStatus()
go this.LoadQueueSendToPeer()
go this.ConsumerPostToPeer()
go this.ConsumerLog()
go this.ConsumerDownLoad()
//go this.LoadSearchDict()
if Config().EnableMigrate {
go this.RepairFileInfoFromFile()
}
if Config().AutoRepair {
go func() {
for {
time.Sleep(time.Minute * 3)
this.AutoRepair(false)
time.Sleep(time.Minute * 60)
}
}()
}
groupRoute := ""
if Config().SupportGroupManage {
groupRoute = "/" + Config().Group
}
uploadPage := "upload.html"
if groupRoute == "" {
http.HandleFunc(fmt.Sprintf("%s", "/"), this.Index)
http.HandleFunc(fmt.Sprintf("/%s", uploadPage), this.Index)
} else {
http.HandleFunc(fmt.Sprintf("%s", groupRoute), this.Index)
http.HandleFunc(fmt.Sprintf("%s/%s", groupRoute, uploadPage), this.Index)
}
http.HandleFunc(fmt.Sprintf("%s/check_file_exist", groupRoute), this.CheckFileExist)
http.HandleFunc(fmt.Sprintf("%s/upload", groupRoute), this.Upload)
http.HandleFunc(fmt.Sprintf("%s/delete", groupRoute), this.RemoveFile)
http.HandleFunc(fmt.Sprintf("%s/get_file_info", groupRoute), this.GetFileInfo)
http.HandleFunc(fmt.Sprintf("%s/sync", groupRoute), this.Sync)
http.HandleFunc(fmt.Sprintf("%s/stat", groupRoute), this.Stat)
http.HandleFunc(fmt.Sprintf("%s/repair_stat", groupRoute), this.RepairStatWeb)
http.HandleFunc(fmt.Sprintf("%s/status", groupRoute), this.Status)
http.HandleFunc(fmt.Sprintf("%s/repair", groupRoute), this.Repair)
http.HandleFunc(fmt.Sprintf("%s/report", groupRoute), this.Report)
http.HandleFunc(fmt.Sprintf("%s/backup", groupRoute), this.BackUp)
http.HandleFunc(fmt.Sprintf("%s/search", groupRoute), this.Search)
http.HandleFunc(fmt.Sprintf("%s/list_dir", groupRoute), this.ListDir)
http.HandleFunc(fmt.Sprintf("%s/remove_empty_dir", groupRoute), this.RemoveEmptyDir)
http.HandleFunc(fmt.Sprintf("%s/repair_fileinfo", groupRoute), this.RepairFileInfo)
http.HandleFunc(fmt.Sprintf("%s/reload", groupRoute), this.Reload)
http.HandleFunc(fmt.Sprintf("%s/syncfile_info", groupRoute), this.SyncFileInfo)
http.HandleFunc(fmt.Sprintf("%s/get_md5s_by_date", groupRoute), this.GetMd5sForWeb)
http.HandleFunc(fmt.Sprintf("%s/receive_md5s", groupRoute), this.ReceiveMd5s)
http.HandleFunc(fmt.Sprintf("%s/gen_google_secret", groupRoute), this.GenGoogleSecret)
http.HandleFunc(fmt.Sprintf("%s/gen_google_code", groupRoute), this.GenGoogleCode)
http.HandleFunc("/"+Config().Group+"/", this.Download)
fmt.Println("Listen on " + Config().Addr)
err := http.ListenAndServe(Config().Addr, new(HttpHandler))
log.Error(err)
fmt.Println(err)
}
func main() {
server.Main()
}
|
[
"\"GO_FASTDFS_DIR\""
] |
[] |
[
"GO_FASTDFS_DIR"
] |
[]
|
["GO_FASTDFS_DIR"]
|
go
| 1 | 0 | |
script.py
|
from queue import Queue
from datetime import datetime, timedelta
import django,os,time,asyncio,smtplib,requests
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CowinNotifier.settings')
django.setup()
from notifier.models import Pin,Order,Email
schedule = Queue(maxsize = 3000)
in_queue=set()
class mail:
def __init__(self, pin, message):
self.pin = pin
self.message = message
def checkAvailability():
num_days = 2
actual = datetime.today()
list_format = [actual + timedelta(days=i) for i in range(num_days)]
actual_dates = [i.strftime("%d-%m-%Y") for i in list_format]
all_pin=Pin.objects.all()
pincodes = list(all_pin)
# print(all_pin)
for i in pincodes:
# print(type(i.pin))
counter=0
pincode=i.pin
for given_date in actual_dates:
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode={}&date={}".format(pincode, given_date)
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
result = requests.get(URL, headers=header)
if result.ok:
response_json = result.json()
if response_json["centers"]:
message=""
for center in response_json["centers"]:
for session in center["sessions"]:
if (session["available_capacity"] > 0 ) :
counter=counter+1
message="\n"+message
message+= " " + "Pincode: " + pincode
message+= " " + "\t"+"Available on: {}".format(given_date)
message+= " " + "\t" + center["name"]
message+= ", " + center["block_name"]
message+= " " + "\t Price: " + center["fee_type"]
message+= " " + "\t Availablity : " + str(session["available_capacity"])
if(session["vaccine"] != ''):
message+= " " + "\t Vaccine type: "+ str(session["vaccine"])
message+= " " + "\n"
if counter!=0:
obj=mail(pincode,message)
if obj not in in_queue:
schedule.put(obj)
in_queue.add(obj)
print("scheduled mails to "+pincode)
return None
def sendEmail():
if schedule.empty():
return 1
obj=schedule.get()
in_queue.remove(obj)
availablePin=obj.pin
response=obj.message
ordersForPin=Order.objects.filter(pin=availablePin)
all_orders=list(ordersForPin)
#if no orders are ther then delete pin from Pin
# print(all_orders)
if len(all_orders) == 0:
del_pin=Pin.objects.filter(pin=availablePin)
del_pin.delete()
return 0
else:
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
your_email = "[email protected]"
your_password = "vanraqwerty"
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(your_email, your_password)
for order in all_orders:
print(order.email)
reciever_email= order.email
msg = MIMEMultipart()
msg['To'] = reciever_email
msg['From'] = your_email
msg['Subject'] = "Vaccine Slots available at pin: " +availablePin
body=response
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
#send email
try:
server.sendmail(your_email,reciever_email,text)
# print('Email to ',reciever_email,'successfully sent!\n\n')
except Exception as e:
print('Email to ',reciever_email,'could not be sent :( to\n\n')
#save in eamil
e= Email(pin=availablePin,email=reciever_email)
e.save()
#delete order from Order
del_ob= Order.objects.filter(id=order.pk)
del_ob.delete()
server.quit()
del_pin=Pin.objects.filter(pin=availablePin)
del_pin.delete()
return None
def main():
while True:
t0 = time.time()
checkAvailability(),
sendEmail(),
t1 = time.time()
print('Took %.2f ms' % (1000*(t1-t0)))
# To save resources on server sleep whole script for 3 minute
time.sleep(6)
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"fmt"
"net/http"
"os"
"os/signal"
"path"
"runtime"
"strings"
"syscall"
"github.com/jessevdk/go-flags"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/webdevops/go-common/prometheus/azuretracing"
"github.com/webdevopos/azure-k8s-autopilot/autopilot"
"github.com/webdevopos/azure-k8s-autopilot/config"
)
const (
Author = "webdevops.io"
UserAgent = "azure-k8s-autopilot/"
)
var (
argparser *flags.Parser
// Git version information
gitCommit = "<unknown>"
gitTag = "<unknown>"
)
var opts = config.Opts{}
func main() {
initArgparser()
log.Infof("starting azure-k8s-autopilot v%s (%s; %s; by %v)", gitTag, gitCommit, runtime.Version(), Author)
log.Info(string(opts.GetJson()))
pilot := autopilot.AzureK8sAutopilot{
Config: opts,
UserAgent: UserAgent + gitTag,
}
pilot.Init()
pilot.Start()
log.Infof("starting http server on %s", opts.ServerBind)
startHttpServer()
termChan := make(chan os.Signal, 1)
signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM) //nolint:staticcheck
<-termChan
log.Info("shutdown signal received, trying to stop")
pilot.Stop()
log.Info("finished, terminating now")
}
// init argparser and parse/validate arguments
func initArgparser() {
argparser = flags.NewParser(&opts, flags.Default)
_, err := argparser.Parse()
// check if there is an parse error
if err != nil {
if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
os.Exit(0)
} else {
fmt.Println(err)
fmt.Println()
argparser.WriteHelp(os.Stdout)
os.Exit(1)
}
}
// verbose level
if opts.Logger.Verbose {
log.SetLevel(log.DebugLevel)
}
// debug level
if opts.Logger.Debug {
log.SetReportCaller(true)
log.SetLevel(log.TraceLevel)
log.SetFormatter(&log.TextFormatter{
CallerPrettyfier: func(f *runtime.Frame) (string, string) {
s := strings.Split(f.Function, ".")
funcName := s[len(s)-1]
return funcName, fmt.Sprintf("%s:%d", path.Base(f.File), f.Line)
},
})
}
// json log format
if opts.Logger.LogJson {
log.SetReportCaller(true)
log.SetFormatter(&log.JSONFormatter{
DisableTimestamp: true,
CallerPrettyfier: func(f *runtime.Frame) (string, string) {
s := strings.Split(f.Function, ".")
funcName := s[len(s)-1]
return funcName, fmt.Sprintf("%s:%d", path.Base(f.File), f.Line)
},
})
}
if val := os.Getenv("DRAIN_DELETE_LOCAL_DATA"); val != "" {
log.Panic("env var DRAIN_DELETE_LOCAL_DATA is deprecated, please use DRAIN_DELETE_EMPTYDIR_DATA")
}
}
// start and handle prometheus handler
func startHttpServer() {
// healthz
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
if _, err := fmt.Fprint(w, "Ok"); err != nil {
log.Error(err)
}
})
http.Handle("/metrics", azuretracing.RegisterAzureMetricAutoClean(promhttp.Handler()))
go func() {
log.Fatal(http.ListenAndServe(opts.ServerBind, nil))
}()
}
|
[
"\"DRAIN_DELETE_LOCAL_DATA\""
] |
[] |
[
"DRAIN_DELETE_LOCAL_DATA"
] |
[]
|
["DRAIN_DELETE_LOCAL_DATA"]
|
go
| 1 | 0 | |
pkg/api/testapi/testapi.go
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testapi provides a helper for retrieving the KUBE_API_VERSION environment variable.
package testapi
import (
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
)
// Version returns the API version to test against as set by the KUBE_API_VERSION env var.
func Version() string {
version := os.Getenv("KUBE_API_VERSION")
if version == "" {
version = latest.Version
}
return version
}
func CodecForVersionOrDie() runtime.Codec {
interfaces, err := latest.InterfacesFor(Version())
if err != nil {
panic(err)
}
return interfaces.Codec
}
|
[
"\"KUBE_API_VERSION\""
] |
[] |
[
"KUBE_API_VERSION"
] |
[]
|
["KUBE_API_VERSION"]
|
go
| 1 | 0 | |
rabbitai/rabbitai_config.py
|
import os
# region Babel config for translations
# Setup default language
BABEL_DEFAULT_LOCALE = "zh"
# Your application default translation path
BABEL_DEFAULT_FOLDER = "rabbitai/translations"
# The allowed translation for you app
LANGUAGES = {
"en": {"flag": "us", "name": "English"},
"es": {"flag": "es", "name": "Spanish"},
"it": {"flag": "it", "name": "Italian"},
"fr": {"flag": "fr", "name": "French"},
"zh": {"flag": "cn", "name": "Chinese"},
"ja": {"flag": "jp", "name": "Japanese"},
"de": {"flag": "de", "name": "German"},
"pt": {"flag": "pt", "name": "Portuguese"},
"pt_BR": {"flag": "br", "name": "Brazilian Portuguese"},
"ru": {"flag": "ru", "name": "Russian"},
"ko": {"flag": "kr", "name": "Korean"},
}
# endregion
# SQLAlchemy 数据库连接字符串
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# 连接到 postgresql 数据库
# 用户名:密码@localhost:端口(5432)/数据库名(superset)
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:psb123456@localhost:5432/superset'
# 默认缓存配置
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
"""项目的根目录,即配置文件所在目录。"""
# 存储数据的目录
if "SUPERSET_HOME" in os.environ:
DATA_DIR = os.environ["SUPERSET_HOME"]
"""数据目录,SUPERSET_HOME系统环境变量定义的目录"""
else:
DATA_DIR = os.path.join(os.path.expanduser("~"), ".superset")
"""数据目录,用户目录下的.superset目录"""
# CACHE_CONFIG: CacheConfig = {"CACHE_TYPE": "null"}
CACHE_CONFIG = {
"CACHE_TYPE": "filesystem",
"CACHE_DIR": DATA_DIR
}
# 数据源和查询结果缓存,默认使用默认缓存配置
DATA_CACHE_CONFIG = CACHE_CONFIG
|
[] |
[] |
[
"SUPERSET_HOME"
] |
[]
|
["SUPERSET_HOME"]
|
python
| 1 | 0 | |
train_imgreid_xent_htri.py
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import warnings
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, TripletLoss, DeepSupervision
from torchreid.utils.iotools import check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \
load_pretrained_weights, save_checkpoint, resume_from_checkpoint
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.utils.generaltools import set_random_seed
from torchreid.eval_metrics import evaluate
from torchreid.samplers import RandomIdentitySampler
from torchreid.optimizers import init_optimizer
from torchreid.lr_schedulers import init_lr_scheduler
# global variables
parser = argument_parser()
args = parser.parse_args()
def main():
global args
set_random_seed(args.seed)
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
else:
warnings.warn('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent', 'htri'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_pretrained_weights(model, args.load_weights)
model = nn.DataParallel(model).cuda() if use_gpu else model
criterion_xent = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
criterion_htri = TripletLoss(margin=args.margin)
optimizer = init_optimizer(model, **optimizer_kwargs(args))
scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs(args))
if args.resume and check_isfile(args.resume):
args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=optimizer)
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
return
time_start = time.time()
ranklogger = RankLogger(args.source_names, args.target_names)
print('=> Start training')
if args.fixbase_epoch > 0:
print('Train {} for {} epochs while keeping other layers frozen'.format(args.open_layers, args.fixbase_epoch))
initial_optim_state = optimizer.state_dict()
for epoch in range(args.fixbase_epoch):
train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu, fixbase=True)
print('Done. All layers are open to train for {} epochs'.format(args.max_epoch))
optimizer.load_state_dict(initial_optim_state)
for epoch in range(args.start_epoch, args.max_epoch):
train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
scheduler.step()
if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch:
print('=> Test')
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
ranklogger.write(name, epoch + 1, rank1)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': epoch + 1,
'arch': args.arch,
'optimizer': optimizer.state_dict(),
}, args.save_dir)
elapsed = round(time.time() - time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed {}'.format(elapsed))
ranklogger.show_summary()
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu, fixbase=False):
xent_losses = AverageMeter()
htri_losses = AverageMeter()
accs = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
if fixbase or args.always_fixbase:
open_specified_layers(model, args.open_layers)
else:
open_all_layers(model)
end = time.time()
for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
outputs, features = model(imgs)
if isinstance(outputs, (tuple, list)):
xent_loss = DeepSupervision(criterion_xent, outputs, pids)
else:
xent_loss = criterion_xent(outputs, pids)
if isinstance(features, (tuple, list)):
htri_loss = DeepSupervision(criterion_htri, features, pids)
else:
htri_loss = criterion_htri(features, pids)
loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
xent_losses.update(xent_loss.item(), pids.size(0))
htri_losses.update(htri_loss.item(), pids.size(0))
accs.update(accuracy(outputs, pids)[0])
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader),
batch_time=batch_time,
data_time=data_time,
xent=xent_losses,
htri=htri_losses,
acc=accs
))
end = time.time()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids = [], [], []
for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))
m, n = qf.size(0), gf.size(0)
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.numpy()
print('Computing CMC and mAP')
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
print('Results ----------')
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in ranks:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r-1]))
print('------------------')
if return_distmat:
return distmat
return cmc[0]
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
app/src/main/java/com/alvin/sdappmanager/LocationActivity.java
|
package com.alvin.sdappmanager;
/*
* Activity for screen "Locations"
*/
import android.content.SharedPreferences;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.CheckedTextView;
import android.widget.ListView;
import java.io.InputStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import android.app.Activity;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import android.widget.Toast;
import java.util.ArrayList;
import java.util.Locale;
import java.util.Set;
class Location implements Comparable {
String name;
String path;
String device;
Integer total;
Integer used;
Integer free;
String fs;
Boolean rw;
Location(String _path) {
path = _path;
}
public void edit(SharedPreferences.Editor e){
e.putString("name", name);
e.putString("path", path);
e.putString("device", device);
e.putInt("total", total);
e.putInt("used", used);
e.putInt("free", free);
e.putString("fs", fs);
e.putBoolean("rw", rw);
e.commit();//TODO: Ok-Cancel thing
//TODO: Mount? remount? write config.
}
@Override
public int compareTo(Object o) {
return ((Location)o).free.compareTo(free);
}
}
class InteractiveArrayAdapter extends ArrayAdapter<Location> {
private String selected = "";
private final List<Location> list;
private final Activity context;
public InteractiveArrayAdapter(Activity context, List<Location> list) {
super(context, R.layout.location_item, list);
this.context = context;
this.list = list;
SharedPreferences settings = context.getSharedPreferences("Location", 0);
selected = settings.getString("path",selected);
}
static class ViewHolder {
protected TextView text;
protected CheckedTextView checkbox;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
View view = null;
if (convertView == null) {
LayoutInflater inflator = context.getLayoutInflater();
view = inflator.inflate(R.layout.location_item, null);
final ViewHolder viewHolder = new ViewHolder();
viewHolder.text = (TextView) view.findViewById(R.id.path);
viewHolder.checkbox = (CheckedTextView) view.findViewById(R.id.name);
view.setTag(viewHolder);
viewHolder.checkbox.setTag(list.get(position));
} else {
view = convertView;
((ViewHolder) view.getTag()).checkbox.setTag(list.get(position));
}
ViewHolder holder = (ViewHolder) view.getTag();
holder.text.setText(list.get(position).path);
holder.checkbox.setText(list.get(position).name);
holder.checkbox.setChecked(list.get(position).path.equals(selected));
return view;
}
public void toggleChecked(int position) {
selected = list.get(position).path;
notifyDataSetChanged();
SharedPreferences settings = context.getSharedPreferences("Location", 0);
list.get(position).edit(settings.edit());
//Toast.makeText(context, list.get(position).path, Toast.LENGTH_LONG).show();
}
}
public class LocationActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_location);
ListView list = (ListView)findViewById(R.id.listView);
final InteractiveArrayAdapter adapter = new InteractiveArrayAdapter(this, getModel());
//new ArrayAdapter<String>(this,R.layout.location_item, R.id.name, {"/mnt/sdcard1", "/mnt/sdcard2"});
list.setAdapter(adapter);
list.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> adapterView, View view, int i, long l) {
adapter.toggleChecked(i);
}
});
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
//Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
//setSupportActionBar(toolbar);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
//TODO: Make this screen a Ok-Cancel Dialog. Don't commit bad things like vfat locations.
//getMenuInflater().inflate(R.menu.done, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.action_menu_done:
case android.R.id.home:
//intent.putExtra("name", "");
//setResult(RESULT_OK, intent);
finish ();
break;
}
return super.onOptionsItemSelected(item);
}
private String getOutput(String[] cmd) {
String s = "";
try {
final Process process = new ProcessBuilder().command(cmd)
.redirectErrorStream(true).start();
process.waitFor();
final InputStream is = process.getInputStream();
final byte[] buffer = new byte[1024];
while (is.read(buffer) != -1) {
s = s + new String(buffer);
}
is.close();
} catch (final Exception e) {
e.printStackTrace();
}
return s;
}
private List<Location> getDf() {
List<Location> list = new ArrayList<Location>();
final String[] lines = getOutput(new String[]{"busybox","df"}).split("\n");
final String mount = getOutput(new String[]{"mount"});
Set<String> set = new HashSet<String>();
for (String line : lines) {
if (line == lines[0]) continue;
String l = line.toLowerCase(Locale.US);
String[] parts = line.split("\\s+");
if (parts.length<6) continue;
String dev = parts[0];
if (Arrays.asList("tmpfs", "rootfs", "none", "sysfs", "devpts", "proc", "htcfs").contains(dev)) continue;
String path = parts[5];
if (path.startsWith("/sys")) continue;
if (path.contains("/asec")) continue;
if (path.contains("/secure")) continue;
if (path.endsWith("/obb")) continue;
Integer total, used, free;
try {
total = Integer.parseInt(parts[1]);
used = Integer.parseInt(parts[2]);
free = Integer.parseInt(parts[3]);
}
catch (NumberFormatException e) { continue; }
if (free < 10000) continue;
String name = "";
switch(path) {
case "/system":name=getString(R.string.system_partition);break;
case "/data":name=getString(R.string.app_partition);break;
case "/cache":name=getString(R.string.cache_partition);break;
}
if (path.contains("/ext")) name=getString(R.string.ext_sd);
else if (path.contains("sdcard")) name=getString(R.string.sd_card);
else if (path.contains("usb")) name=getString(R.string.sd_usb);
if (path.contains("emulated")) name=getString(R.string.emulated);
Location loc = new Location(path);
loc.name=name.equals("")?getString(R.string.unknown_location):name;
loc.device=dev;
loc.free=free;
loc.used=used;
loc.total=total;
int i = mount.indexOf(" "+path+" ");
if(i != -1) {
parts=mount.substring(i, mount.indexOf("\n",i)).split(" ");
if (parts[1].equals("type")) {
loc.fs = parts[2];
loc.rw = parts[3].startsWith("rw") || parts[3].startsWith("(rw");
}
else {
loc.fs = parts[1];
loc.rw = parts[2].startsWith("rw") || parts[2].startsWith("(rw");
}
}
if (!set.contains(path)) list.add(loc);
set.add(path);
}
//list.get(0).setSelected(true);
return list;
}
private List<Location> getModel() {
List<Location> list = getDf();
Collections.sort(list);
//list.add(new Location(System.getenv("EXTERNAL_STORAGE")));
//list.add(new Location(System.getenv("SECONDARY_STORAGE")));
//list.add(new Location(System.getenv("EMULATED_STORAGE_TARGET")));
return list;
}
}
|
[
"\"EXTERNAL_STORAGE\"",
"\"SECONDARY_STORAGE\"",
"\"EMULATED_STORAGE_TARGET\""
] |
[] |
[
"EMULATED_STORAGE_TARGET",
"SECONDARY_STORAGE",
"EXTERNAL_STORAGE"
] |
[]
|
["EMULATED_STORAGE_TARGET", "SECONDARY_STORAGE", "EXTERNAL_STORAGE"]
|
java
| 3 | 0 | |
task_man/wsgi.py
|
"""
WSGI config for task_man project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'task_man.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
env/lib/python3.8/site-packages/plotly/validators/scatter/error_x/_value.py
|
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="value", parent_name="scatter.error_x", **kwargs):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
Tests/test_font.py
|
import pathlib
from blackrenderer.font import BlackRendererFont
testDir = pathlib.Path(__file__).resolve().parent
testFont1 = testDir / "data" / "noto-glyf_colr_1.ttf"
def test_font():
font = BlackRendererFont(testFont1)
assert len(font.glyphNames) > len(font.colrV0GlyphNames)
assert len(font.glyphNames) > len(font.colrV1GlyphNames)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
scripts/run_backend_tests.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running backend tests in parallel.
This should not be run directly. Instead, navigate to the oppia/ folder and
execute:
python -m scripts.run_backend_tests
You can also append the following options to the above command:
--verbose prints the output of the tests to the console.
--test_target=core.controllers.editor_test runs only the tests in the
core.controllers.editor_test module. (You can change
"core.controllers.editor_test" to any valid module path.)
--test_path=core/controllers runs all tests in test files in the
core/controllers directory. (You can change "core/controllers" to any
valid subdirectory path.)
--generate_coverage_report generates a coverage report as part of the final
test output (but it makes the tests slower).
Note: If you've made some changes and tests are failing to run at all, this
might mean that you have introduced a circular dependency (e.g. module A
imports module B, which imports module C, which imports module A). This needs
to be fixed before the tests will run.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import importlib
import inspect
import multiprocessing
import os
import re
import subprocess
import sys
import threading
import time
import unittest
import python_utils
from . import common
from . import concurrent_task_utils
from . import install_third_party_libs
DIRS_TO_ADD_TO_SYS_PATH = [
os.path.join(common.OPPIA_TOOLS_DIR, 'pylint-1.9.4'),
common.GOOGLE_APP_ENGINE_SDK_HOME,
os.path.join(common.OPPIA_TOOLS_DIR, 'webtest-%s' % common.WEBTEST_VERSION),
os.path.join(
common.GOOGLE_APP_ENGINE_SDK_HOME, 'lib', 'webob_0_9'),
os.path.join(common.OPPIA_TOOLS_DIR, 'Pillow-%s' % common.PILLOW_VERSION),
os.path.join(common.OPPIA_TOOLS_DIR, 'psutil-%s' % common.PSUTIL_VERSION),
os.path.join(
common.OPPIA_TOOLS_DIR, 'PyGithub-%s' % common.PYGITHUB_VERSION),
common.CURR_DIR,
os.path.join(common.THIRD_PARTY_DIR, 'backports.functools_lru_cache-1.6.1'),
os.path.join(common.THIRD_PARTY_DIR, 'beautifulsoup4-4.9.1'),
os.path.join(common.THIRD_PARTY_DIR, 'bleach-3.1.5'),
os.path.join(common.THIRD_PARTY_DIR, 'callbacks-0.3.0'),
os.path.join(common.THIRD_PARTY_DIR, 'gae-cloud-storage-1.9.22.1'),
os.path.join(common.THIRD_PARTY_DIR, 'gae-mapreduce-1.9.22.0'),
os.path.join(common.THIRD_PARTY_DIR, 'gae-pipeline-1.9.22.1'),
os.path.join(common.THIRD_PARTY_DIR, 'graphy-1.0.0'),
os.path.join(common.THIRD_PARTY_DIR, 'html5lib-python-1.1'),
os.path.join(common.THIRD_PARTY_DIR, 'mutagen-1.43.0'),
os.path.join(common.THIRD_PARTY_DIR, 'packaging-20.4'),
os.path.join(common.THIRD_PARTY_DIR, 'pylatexenc-2.6'),
os.path.join(common.THIRD_PARTY_DIR, 'simplejson-3.17.0'),
os.path.join(common.THIRD_PARTY_DIR, 'six-1.15.0'),
os.path.join(common.THIRD_PARTY_DIR, 'soupsieve-1.9.5'),
os.path.join(common.THIRD_PARTY_DIR, 'webencodings-0.5.1'),
]
COVERAGE_DIR = os.path.join(
os.getcwd(), os.pardir, 'oppia_tools',
'coverage-%s' % common.COVERAGE_VERSION)
COVERAGE_MODULE_PATH = os.path.join(
os.getcwd(), os.pardir, 'oppia_tools',
'coverage-%s' % common.COVERAGE_VERSION, 'coverage')
TEST_RUNNER_PATH = os.path.join(os.getcwd(), 'core', 'tests', 'gae_suite.py')
# This should be the same as core.test_utils.LOG_LINE_PREFIX.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
_LOAD_TESTS_DIR = os.path.join(os.getcwd(), 'core', 'tests', 'load_tests')
_PARSER = argparse.ArgumentParser(
description="""
Run this script from the oppia root folder:
python -m scripts.run_backend_tests
IMPORTANT: Only one of --test_path and --test_target should be specified.
""")
_EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group()
_EXCLUSIVE_GROUP.add_argument(
'--test_target',
help='optional dotted module name of the test(s) to run',
type=python_utils.UNICODE)
_EXCLUSIVE_GROUP.add_argument(
'--test_path',
help='optional subdirectory path containing the test(s) to run',
type=python_utils.UNICODE)
_PARSER.add_argument(
'--generate_coverage_report',
help='optional; if specified, generates a coverage report',
action='store_true')
_PARSER.add_argument(
'--exclude_load_tests',
help='optional; if specified, exclude load tests from being run',
action='store_true')
_PARSER.add_argument(
'-v',
'--verbose',
help='optional; if specified, display the output of the tests being run',
action='store_true')
def run_shell_cmd(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""Runs a shell command and captures the stdout and stderr output.
If the cmd fails, raises Exception. Otherwise, returns a string containing
the concatenation of the stdout and stderr logs.
"""
p = subprocess.Popen(exe, stdout=stdout, stderr=stderr)
last_stdout_str, last_stderr_str = p.communicate()
# Converting to unicode to stay compatible with the rest of the strings.
last_stdout_str = last_stdout_str.decode(encoding='utf-8')
last_stderr_str = last_stderr_str.decode(encoding='utf-8')
last_stdout = last_stdout_str.split('\n')
if LOG_LINE_PREFIX in last_stdout_str:
concurrent_task_utils.log('')
for line in last_stdout:
if line.startswith(LOG_LINE_PREFIX):
concurrent_task_utils.log(
'INFO: %s' % line[len(LOG_LINE_PREFIX):])
concurrent_task_utils.log('')
result = '%s%s' % (last_stdout_str, last_stderr_str)
if p.returncode != 0:
raise Exception('Error %s\n%s' % (p.returncode, result))
return result
class TestingTaskSpec(python_utils.OBJECT):
"""Executes a set of tests given a test class name."""
def __init__(self, test_target, generate_coverage_report):
self.test_target = test_target
self.generate_coverage_report = generate_coverage_report
def run(self):
"""Runs all tests corresponding to the given test target."""
test_target_flag = '--test_target=%s' % self.test_target
if self.generate_coverage_report:
exc_list = [
sys.executable, COVERAGE_MODULE_PATH, 'run', '-p',
TEST_RUNNER_PATH, test_target_flag]
else:
exc_list = [sys.executable, TEST_RUNNER_PATH, test_target_flag]
return run_shell_cmd(exc_list)
def _get_all_test_targets(test_path=None, include_load_tests=True):
"""Returns a list of test targets for all classes under test_path
containing tests.
"""
def _get_test_target_classes(path):
"""Returns a list of all test classes in a given test file path.
Args:
path: str. The path of the test file from which all test classes
are to be extracted.
Returns:
list. A list of all test classes in a given test file path.
"""
class_names = []
test_target_path = os.path.relpath(
path, os.getcwd())[:-3].replace('/', '.')
python_module = importlib.import_module(test_target_path)
for name, clazz in inspect.getmembers(
python_module, predicate=inspect.isclass):
if unittest.TestCase in inspect.getmro(clazz):
class_names.append(name)
return [
'%s.%s' % (test_target_path, class_name)
for class_name in class_names]
base_path = os.path.join(os.getcwd(), test_path or '')
result = []
excluded_dirs = ['.git', 'third_party', 'core/tests', 'node_modules']
for root in os.listdir(base_path):
if any([s in root for s in excluded_dirs]):
continue
if root.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(base_path, root)))
for subroot, _, files in os.walk(os.path.join(base_path, root)):
if _LOAD_TESTS_DIR in subroot and include_load_tests:
for f in files:
if f.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
for f in files:
if (f.endswith('_test.py') and
os.path.join('core', 'tests') not in subroot):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
return result
def main(args=None):
"""Run the tests."""
parsed_args = _PARSER.parse_args(args=args)
# Make sure that third-party libraries are up-to-date before running tests,
# otherwise import errors may result.
install_third_party_libs.main()
for directory in DIRS_TO_ADD_TO_SYS_PATH:
if not os.path.exists(os.path.dirname(directory)):
raise Exception('Directory %s does not exist.' % directory)
# The directories should only be inserted starting at index 1. See
# https://stackoverflow.com/a/10095099 and
# https://stackoverflow.com/q/10095037 for more details.
sys.path.insert(1, directory)
import dev_appserver
dev_appserver.fix_sys_path()
if parsed_args.generate_coverage_report:
python_utils.PRINT(
'Checking whether coverage is installed in %s'
% common.OPPIA_TOOLS_DIR)
if not os.path.exists(
os.path.join(
common.OPPIA_TOOLS_DIR,
'coverage-%s' % common.COVERAGE_VERSION)):
raise Exception(
'Coverage is not installed, please run the start script.')
pythonpath_components = [COVERAGE_DIR]
if os.environ.get('PYTHONPATH'):
pythonpath_components.append(os.environ.get('PYTHONPATH'))
os.environ['PYTHONPATH'] = os.pathsep.join(pythonpath_components)
if parsed_args.test_target and parsed_args.test_path:
raise Exception(
'At most one of test_path and test_target should be specified.')
if parsed_args.test_path and '.' in parsed_args.test_path:
raise Exception('The delimiter in test_path should be a slash (/)')
if parsed_args.test_target and '/' in parsed_args.test_target:
raise Exception('The delimiter in test_target should be a dot (.)')
if parsed_args.test_target:
if '_test' in parsed_args.test_target:
all_test_targets = [parsed_args.test_target]
else:
python_utils.PRINT('')
python_utils.PRINT(
'---------------------------------------------------------')
python_utils.PRINT(
'WARNING : test_target flag should point to the test file.')
python_utils.PRINT(
'---------------------------------------------------------')
python_utils.PRINT('')
time.sleep(3)
python_utils.PRINT('Redirecting to its corresponding test file...')
all_test_targets = [parsed_args.test_target + '_test']
else:
include_load_tests = not parsed_args.exclude_load_tests
all_test_targets = _get_all_test_targets(
test_path=parsed_args.test_path,
include_load_tests=include_load_tests)
# Prepare tasks.
max_concurrent_runs = 25
concurrent_count = min(multiprocessing.cpu_count(), max_concurrent_runs)
semaphore = threading.Semaphore(concurrent_count)
task_to_taskspec = {}
tasks = []
for test_target in all_test_targets:
test = TestingTaskSpec(
test_target, parsed_args.generate_coverage_report)
task = concurrent_task_utils.create_task(
test.run, parsed_args.verbose, semaphore, name=test_target)
task_to_taskspec[task] = test
tasks.append(task)
task_execution_failed = False
try:
concurrent_task_utils.execute_tasks(tasks, semaphore)
except Exception:
task_execution_failed = True
for task in tasks:
if task.exception:
concurrent_task_utils.log(
python_utils.convert_to_bytes(task.exception.args[0]))
python_utils.PRINT('')
python_utils.PRINT('+------------------+')
python_utils.PRINT('| SUMMARY OF TESTS |')
python_utils.PRINT('+------------------+')
python_utils.PRINT('')
# Check we ran all tests as expected.
total_count = 0
total_errors = 0
total_failures = 0
for task in tasks:
spec = task_to_taskspec[task]
if not task.finished:
python_utils.PRINT('CANCELED %s' % spec.test_target)
test_count = 0
elif (task.exception and
'No tests were run' in python_utils.convert_to_bytes(
task.exception.args[0])):
python_utils.PRINT(
'ERROR %s: No tests found.' % spec.test_target)
test_count = 0
elif task.exception:
exc_str = python_utils.convert_to_bytes(task.exception.args[0])
python_utils.PRINT(exc_str[exc_str.find('='): exc_str.rfind('-')])
tests_failed_regex_match = re.search(
r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
'([0-9]+) failures',
python_utils.convert_to_bytes(task.exception.args[0]))
try:
test_count = int(tests_failed_regex_match.group(1))
errors = int(tests_failed_regex_match.group(2))
failures = int(tests_failed_regex_match.group(3))
total_errors += errors
total_failures += failures
python_utils.PRINT('FAILED %s: %s errors, %s failures' % (
spec.test_target, errors, failures))
except AttributeError:
# There was an internal error, and the tests did not run (The
# error message did not match `tests_failed_regex_match`).
test_count = 0
total_errors += 1
python_utils.PRINT('')
python_utils.PRINT(
'------------------------------------------------------')
python_utils.PRINT(
' WARNING: FAILED TO RUN %s' % spec.test_target)
python_utils.PRINT('')
python_utils.PRINT(
' This is most likely due to an import error.')
python_utils.PRINT(
'------------------------------------------------------')
else:
try:
tests_run_regex_match = re.search(
r'Ran ([0-9]+) tests? in ([0-9\.]+)s', task.output)
test_count = int(tests_run_regex_match.group(1))
test_time = float(tests_run_regex_match.group(2))
python_utils.PRINT(
'SUCCESS %s: %d tests (%.1f secs)' %
(spec.test_target, test_count, test_time))
except Exception:
python_utils.PRINT(
'An unexpected error occurred. '
'Task output:\n%s' % task.output)
total_count += test_count
python_utils.PRINT('')
if total_count == 0:
raise Exception('WARNING: No tests were run.')
python_utils.PRINT('Ran %s test%s in %s test class%s.' % (
total_count, '' if total_count == 1 else 's',
len(tasks), '' if len(tasks) == 1 else 'es'))
if total_errors or total_failures:
python_utils.PRINT(
'(%s ERRORS, %s FAILURES)' % (total_errors, total_failures))
else:
python_utils.PRINT('All tests passed.')
if task_execution_failed:
raise Exception('Task execution failed.')
elif total_errors or total_failures:
raise Exception(
'%s errors, %s failures' % (total_errors, total_failures))
if parsed_args.generate_coverage_report:
subprocess.check_call([sys.executable, COVERAGE_MODULE_PATH, 'combine'])
process = subprocess.Popen(
[sys.executable, COVERAGE_MODULE_PATH, 'report',
'--omit="%s*","third_party/*","/usr/share/*"'
% common.OPPIA_TOOLS_DIR, '--show-missing'],
stdout=subprocess.PIPE)
report_stdout, _ = process.communicate()
python_utils.PRINT(report_stdout)
coverage_result = re.search(
r'TOTAL\s+(\d+)\s+(\d+)\s+(?P<total>\d+)%\s+', report_stdout)
if coverage_result.group('total') != '100':
raise Exception('Backend test coverage is not 100%')
python_utils.PRINT('')
python_utils.PRINT('Done!')
if __name__ == '__main__':
main()
|
[] |
[] |
[
"PYTHONPATH"
] |
[]
|
["PYTHONPATH"]
|
python
| 1 | 0 | |
src/net/http/fs_test.go
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
{r: "bytes=0-9", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
{r: "bytes=0-10", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=0-11", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=10-11", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=10-", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=11-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-1000", code: StatusRequestedRangeNotSatisfiable},
}
func TestServeFile(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
func TestServeFile_DotDot(t *testing.T) {
tests := []struct {
req string
wantStatus int
}{
{"/testdata/file", 200},
{"/../file", 400},
{"/..", 400},
{"/../", 400},
{"/../foo", 400},
{"/..\\foo", 400},
{"/file/a", 200},
{"/file/a..", 200},
{"/file/a/..", 400},
{"/file/a\\..", 400},
}
for _, tt := range tests {
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + tt.req + " HTTP/1.1\r\nHost: foo\r\n\r\n")))
if err != nil {
t.Errorf("bad request %q: %v", tt.req, err)
continue
}
rec := httptest.NewRecorder()
ServeFile(rec, req, "testdata/file")
if rec.Code != tt.wantStatus {
t.Errorf("for request %q, status = %d; want %d", tt.req, rec.Code, tt.wantStatus)
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
defer afterTest(t)
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func TestFileServerEscapesNames(t *testing.T) {
defer afterTest(t)
const dirListPrefix = "<pre>\n"
const dirListSuffix = "\n</pre>\n"
tests := []struct {
name, escaped string
}{
{`simple_name`, `<a href="simple_name">simple_name</a>`},
{`"'<>&`, `<a href="%22%27%3C%3E&">"'<>&</a>`},
{`?foo=bar#baz`, `<a href="%3Ffoo=bar%23baz">?foo=bar#baz</a>`},
{`<combo>?foo`, `<a href="%3Ccombo%3E%3Ffoo"><combo>?foo</a>`},
{`foo:bar`, `<a href="./foo:bar">foo:bar</a>`},
}
// We put each test file in its own directory in the fakeFS so we can look at it in isolation.
fs := make(fakeFS)
for i, test := range tests {
testFile := &fakeFileInfo{basename: test.name}
fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
dir: true,
modtime: time.Unix(1000000000, 0).UTC(),
ents: []*fakeFileInfo{testFile},
}
fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
for i, test := range tests {
url := fmt.Sprintf("%s/%d", ts.URL, i)
res, err := Get(url)
if err != nil {
t.Fatalf("test %q: Get: %v", test.name, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("test %q: read Body: %v", test.name, err)
}
s := string(b)
if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
}
if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
}
res.Body.Close()
}
}
func TestFileServerSortsNames(t *testing.T) {
defer afterTest(t)
const contents = "I am a fake file"
dirMod := time.Unix(123, 0).UTC()
fileMod := time.Unix(1000000000, 0).UTC()
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{
{
basename: "b",
modtime: fileMod,
contents: contents,
},
{
basename: "a",
modtime: fileMod,
contents: contents,
},
},
},
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("read Body: %v", err)
}
s := string(b)
if !strings.Contains(s, "<a href=\"a\">a</a>\n<a href=\"b\">b</a>") {
t.Errorf("output appears to be unsorted:\n%s", s)
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
defer afterTest(t)
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on windows")
}
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Skip("skipping test; no /etc/hosts file")
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
defer afterTest(t)
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.FormValue("override") {
case "1":
w.Header().Set("Content-Type", ctype)
case "2":
// Explicitly inhibit sniffing.
w.Header()["Content-Type"] = []string{}
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override string, want []string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
}
resp.Body.Close()
}
get("0", []string{"text/plain; charset=utf-8"})
get("1", []string{ctype})
get("2", nil)
}
func TestServeFileMimeType(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
// Issue 13996
func TestServeDirWithoutTrailingSlash(t *testing.T) {
e := "/testdata/"
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, ".")
}))
defer ts.Close()
r, err := Get(ts.URL + "/testdata")
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if g := r.Request.URL.Path; g != e {
t.Errorf("got %s, want %s", g, e)
}
}
// Tests that ServeFile doesn't add a Content-Length if a Content-Encoding is
// specified.
func TestServeFileWithContentEncoding_h1(t *testing.T) { testServeFileWithContentEncoding(t, h1Mode) }
func TestServeFileWithContentEncoding_h2(t *testing.T) { testServeFileWithContentEncoding(t, h2Mode) }
func testServeFileWithContentEncoding(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
// Because the testdata is so small, it would fit in
// both the h1 and h2 Server's write buffers. For h1,
// sendfile is used, though, forcing a header flush at
// the io.Copy. http2 doesn't do a header flush so
// buffers all 11 bytes and then adds its own
// Content-Length. To prevent the Server's
// Content-Length and test ServeFile only, flush here.
w.(Flusher).Flush()
}))
defer cst.close()
resp, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
defer afterTest(t)
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
err error
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
entpos int
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
limit := f.entpos + count
if count <= 0 || limit > len(f.fi.ents) {
limit = len(f.fi.ents)
}
for ; f.entpos < limit; f.entpos++ {
fis = append(fis, f.fi.ents[f.entpos])
}
if len(fis) == 0 && count > 0 {
return fis, io.EOF
} else {
return fis, nil
}
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
return nil, os.ErrNotExist
}
if f.err != nil {
return nil, f.err
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
defer afterTest(t)
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
defer afterTest(t)
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
// One of file or content must be set:
file string
content io.ReadSeeker
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantContentRange string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"not_modified_etag_no_seek": {
content: panicOnSeek{nil}, // should never be called
serveETag: `W/"foo"`, // If-None-Match uses weak ETag comparison
reqHeader: map[string]string{
"If-None-Match": `"baz", W/"foo"`,
},
wantStatus: 304,
},
"if_none_match_mismatch": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"Foo"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"A"`,
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `W/"A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_no_overlap": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=10-20",
},
wantStatus: StatusRequestedRangeNotSatisfiable,
wantContentType: "text/plain; charset=utf-8",
wantContentRange: "bytes */8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_with_modtime": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"range_with_modtime_nanos": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 123 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"unix_zero_modtime": {
content: strings.NewReader("<html>foo"),
modtime: time.Unix(0, 0),
wantStatus: StatusOK,
wantContentType: "text/html; charset=utf-8",
},
"ifmatch_matches": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"Z", "A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_star": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `*`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_failed": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"B"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"ifmatch_fails_on_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"If-Match": `W/"A"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"if_unmodified_since_true": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
"if_unmodified_since_false": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.Add(-2 * time.Second).UTC().Format(TimeFormat),
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
}
for testName, tt := range tests {
var content io.ReadSeeker
if tt.file != "" {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
content = f
} else {
content = tt.content
}
servec <- serveParam{
name: filepath.Base(tt.file),
content: content,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
res, err := DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Content-Range"), tt.wantContentRange; g != e {
t.Errorf("test %q: content-range = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
}
}
}
// Issue 12991
func TestServerFileStatError(t *testing.T) {
rec := httptest.NewRecorder()
r, _ := NewRequest("GET", "http://foo/", nil)
redirect := false
name := "file.txt"
fs := issue12991FS{}
ExportServeFile(rec, r, fs, name, redirect)
if body := rec.Body.String(); !strings.Contains(body, "403") || !strings.Contains(body, "Forbidden") {
t.Errorf("wanted 403 forbidden message; got: %s", body)
}
}
type issue12991FS struct{}
func (issue12991FS) Open(string) (File, error) { return issue12991File{}, nil }
type issue12991File struct{ File }
func (issue12991File) Stat() (os.FileInfo, error) { return nil, os.ErrPermission }
func (issue12991File) Close() error { return nil }
func TestServeContentErrorMessages(t *testing.T) {
defer afterTest(t)
fs := fakeFS{
"/500": &fakeFileInfo{
err: errors.New("random error"),
},
"/403": &fakeFileInfo{
err: &os.PathError{Err: os.ErrPermission},
},
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
for _, code := range []int{403, 404, 500} {
res, err := DefaultClient.Get(fmt.Sprintf("%s/%d", ts.URL, code))
if err != nil {
t.Errorf("Error fetching /%d: %v", code, err)
continue
}
if res.StatusCode != code {
t.Errorf("For /%d, status code = %d; want %d", code, res.StatusCode, code)
}
res.Body.Close()
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
setParallel(t)
defer afterTest(t)
if runtime.GOOS != "linux" {
t.Skip("skipping; linux-only test")
}
if _, err := exec.LookPath("strace"); err != nil {
t.Skip("skipping; strace not found in path")
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
syscalls := "sendfile,sendfile64"
switch runtime.GOARCH {
case "mips64le", "s390x":
// strace on the above platforms doesn't support sendfile64
// and will error out if we specify that with `-e trace='.
syscalls = "sendfile"
case "mips64":
t.Skip("TODO: update this test to be robust against various versions of strace on mips64. See golang.org/issue/33430")
}
var buf bytes.Buffer
child := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
if err := child.Start(); err != nil {
t.Skipf("skipping; failed to start straced child: %v", err)
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Post(fmt.Sprintf("http://%s/quit", ln.Addr()), "", nil)
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+`)
out := buf.String()
if !rx.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
r, err := DefaultClient.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
// Issue 18984: tests that requests for paths beyond files return not-found errors
func TestFileServerNotDirError(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir("testdata")))
defer ts.Close()
res, err := Get(ts.URL + "/index.html/not-a-file")
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if res.StatusCode != 404 {
t.Errorf("StatusCode = %v; want 404", res.StatusCode)
}
test := func(name string, dir Dir) {
t.Run(name, func(t *testing.T) {
_, err = dir.Open("/index.html/not-a-file")
if err == nil {
t.Fatal("err == nil; want != nil")
}
if !os.IsNotExist(err) {
t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
}
_, err = dir.Open("/index.html/not-a-dir/not-a-file")
if err == nil {
t.Fatal("err == nil; want != nil")
}
if !os.IsNotExist(err) {
t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
}
})
}
absPath, err := filepath.Abs("testdata")
if err != nil {
t.Fatal("get abs path:", err)
}
test("RelativePath", Dir("testdata"))
test("AbsolutePath", Dir(absPath))
}
func TestFileServerCleanPath(t *testing.T) {
tests := []struct {
path string
wantCode int
wantOpen []string
}{
{"/", 200, []string{"/", "/index.html"}},
{"/dir", 301, []string{"/dir"}},
{"/dir/", 200, []string{"/dir", "/dir/index.html"}},
}
for _, tt := range tests {
var log []string
rr := httptest.NewRecorder()
req, _ := NewRequest("GET", "http://foo.localhost"+tt.path, nil)
FileServer(fileServerCleanPathDir{&log}).ServeHTTP(rr, req)
if !reflect.DeepEqual(log, tt.wantOpen) {
t.Logf("For %s: Opens = %q; want %q", tt.path, log, tt.wantOpen)
}
if rr.Code != tt.wantCode {
t.Logf("For %s: Response code = %d; want %d", tt.path, rr.Code, tt.wantCode)
}
}
}
type fileServerCleanPathDir struct {
log *[]string
}
func (d fileServerCleanPathDir) Open(path string) (File, error) {
*(d.log) = append(*(d.log), path)
if path == "/" || path == "/dir" || path == "/dir/" {
// Just return back something that's a directory.
return Dir(".").Open(".")
}
return nil, os.ErrNotExist
}
type panicOnSeek struct{ io.ReadSeeker }
func Test_scanETag(t *testing.T) {
tests := []struct {
in string
wantETag string
wantRemain string
}{
{`W/"etag-1"`, `W/"etag-1"`, ""},
{`"etag-2"`, `"etag-2"`, ""},
{`"etag-1", "etag-2"`, `"etag-1"`, `, "etag-2"`},
{"", "", ""},
{"", "", ""},
{"W/", "", ""},
{`W/"truc`, "", ""},
{`w/"case-sensitive"`, "", ""},
}
for _, test := range tests {
etag, remain := ExportScanETag(test.in)
if etag != test.wantETag || remain != test.wantRemain {
t.Errorf("scanETag(%q)=%q %q, want %q %q", test.in, etag, remain, test.wantETag, test.wantRemain)
}
}
}
|
[
"\"GO_WANT_HELPER_PROCESS\""
] |
[] |
[
"GO_WANT_HELPER_PROCESS"
] |
[]
|
["GO_WANT_HELPER_PROCESS"]
|
go
| 1 | 0 | |
test/deployframework/helpers.go
|
package deployframework
import (
"bufio"
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
metering "github.com/kube-reporting/metering-operator/pkg/apis/metering/v1"
meteringclient "github.com/kube-reporting/metering-operator/pkg/generated/clientset/versioned/typed/metering/v1"
olmv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
olmclientv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1alpha1"
)
func checkPodStatus(pod v1.Pod) (bool, int) {
if pod.Status.Phase != v1.PodRunning {
return false, 0
}
var unreadyContainers int
for _, status := range pod.Status.ContainerStatuses {
if !status.Ready {
unreadyContainers++
}
}
return unreadyContainers == 0, len(pod.Status.ContainerStatuses) - unreadyContainers
}
func createResourceDirs(namespace, path string) ([]string, error) {
envVarArr := []string{
"METERING_TEST_NAMESPACE=" + namespace,
}
testDirsMap := map[string]string{
logDir: "LOG_DIR",
reportsDir: "REPORTS_DIR",
meteringconfigDir: "METERINGCONFIGS_DIR",
datasourcesDir: "DATASOURCES_DIR",
reportqueriesDir: "REPORTQUERIES_DIR",
hivetablesDir: "HIVETABLES_DIR",
prestotablesDir: "PRESTOTABLES_DIR",
storagelocationsDir: "STORAGELOCATIONS_DIR",
}
for dirname, env := range testDirsMap {
dirPath := filepath.Join(path, dirname)
err := os.MkdirAll(dirPath, 0777)
if err != nil {
return nil, fmt.Errorf("failed to create the directory %s: %v", dirPath, err)
}
envVarArr = append(envVarArr, env+"="+dirPath)
}
return envVarArr, nil
}
func logPollingSummary(logger logrus.FieldLogger, targetPods int, readyPods []string, unreadyPods []podStat) {
logger.Infof("Poll Summary")
logger.Infof("Current ratio of ready to target pods: %d/%d", len(readyPods), targetPods)
for _, unreadyPod := range unreadyPods {
if unreadyPod.Total == 0 {
logger.Infof("Pod %s is pending", unreadyPod.PodName)
continue
}
logger.Infof("Pod %s has %d/%d ready containers", unreadyPod.PodName, unreadyPod.Ready, unreadyPod.Total)
}
}
func validateImageConfig(image metering.ImageConfig) error {
var errArr []string
if image.Repository == "" {
errArr = append(errArr, "the image repository is empty")
}
if image.Tag == "" {
errArr = append(errArr, "the image tag is empty")
}
if len(errArr) != 0 {
return fmt.Errorf(strings.Join(errArr, "\n"))
}
return nil
}
type PodWaiter struct {
InitialDelay time.Duration
TimeoutPeriod time.Duration
Logger logrus.FieldLogger
Client kubernetes.Interface
}
type podStat struct {
PodName string
Ready int
Total int
}
// WaitForPods periodically polls the list of pods in the namespace
// and ensures the metering pods created are considered ready. In order to exit
// the polling loop, the number of pods listed must match the expected number
// of targetPodsCount, and all pod containers listed must report a ready status.
func (pw *PodWaiter) WaitForPods(namespace string, targetPodsCount int) error {
// TODO: generalize this more and pass a meta.ListOptions parameter
err := wait.Poll(pw.InitialDelay, pw.TimeoutPeriod, func() (done bool, err error) {
var readyPods []string
var unreadyPods []podStat
pods, err := pw.Client.CoreV1().Pods(namespace).List(meta.ListOptions{})
if err != nil {
return false, err
}
for _, pod := range pods.Items {
podIsReady, readyContainers := checkPodStatus(pod)
if podIsReady {
readyPods = append(readyPods, pod.Name)
continue
}
unreadyPods = append(unreadyPods, podStat{
PodName: pod.Name,
Ready: readyContainers,
Total: len(pod.Status.ContainerStatuses),
})
}
if pw.Logger != nil {
logPollingSummary(pw.Logger, targetPodsCount, readyPods, unreadyPods)
}
return len(pods.Items) == targetPodsCount && len(unreadyPods) == 0, nil
})
if err != nil {
return fmt.Errorf("the pods failed to report a ready status before the timeout period occurred: %v", err)
}
return nil
}
// GetServiceAccountToken queries the namespace for the service account and attempts
// to find the secret that contains the serviceAccount token and return it.
func GetServiceAccountToken(client kubernetes.Interface, initialDelay, timeoutPeriod time.Duration, namespace, serviceAccountName string) (string, error) {
var (
sa *v1.ServiceAccount
err error
)
err = wait.Poll(initialDelay, timeoutPeriod, func() (done bool, err error) {
sa, err = client.CoreV1().ServiceAccounts(namespace).Get(serviceAccountName, meta.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return "", fmt.Errorf("error getting service account %s: %v", reportingOperatorServiceAccountName, err)
}
if len(sa.Secrets) == 0 {
return "", fmt.Errorf("service account %s has no secrets", serviceAccountName)
}
var secretName string
for _, secret := range sa.Secrets {
if strings.Contains(secret.Name, "token") {
secretName = secret.Name
break
}
}
if secretName == "" {
return "", fmt.Errorf("%s service account has no token", serviceAccountName)
}
secret, err := client.CoreV1().Secrets(namespace).Get(secretName, meta.GetOptions{})
if err != nil {
return "", fmt.Errorf("failed getting %s service account token secret: %v", serviceAccountName, err)
}
return string(secret.Data["token"]), nil
}
func waitForURLToReportStatusOK(logger logrus.FieldLogger, targetURL string, timeout time.Duration) error {
u, err := url.Parse(targetURL)
if err != nil {
return fmt.Errorf("failed to parse the %s URL: %v", targetURL, err)
}
logger.Debugf("Waiting for the %s url to report a 200 status", u)
err = wait.Poll(10*time.Second, timeout, func() (done bool, err error) {
resp, err := http.Get(u.String())
if err != nil {
return false, nil
}
defer resp.Body.Close()
return resp.StatusCode == http.StatusOK, nil
})
if err != nil {
return fmt.Errorf("timed-out while waiting for the %s url to report a 200 status code: %v", u, err)
}
logger.Infof("The %s url reported a 200 status code", u)
return nil
}
func runCleanupScript(logger logrus.FieldLogger, namespace, outputPath, scriptPath string) error {
var errArr []string
envVarArr, err := createResourceDirs(namespace, outputPath)
if err != nil {
errArr = append(errArr, fmt.Sprintf("failed to create the resource output directories: %v", err))
}
cleanupCmd := exec.Command(scriptPath)
cleanupStdout, err := cleanupCmd.StdoutPipe()
if err != nil {
errArr = append(errArr, fmt.Sprintf("failed to create a pipe from command output to stdout: %v", err))
}
scanner := bufio.NewScanner(cleanupStdout)
go func() {
for scanner.Scan() {
logger.Infof(scanner.Text())
}
}()
cleanupCmd.Env = append(os.Environ(), envVarArr...)
err = cleanupCmd.Run()
if err != nil {
// TODO(tflannag): we need to add more flexibility to this
// function, especially in the case where we expect that a
// test case will fail, and it did fail, but the gather test
// install artifacts scripts will return a non-zero exit code
// as it cannot successfully log any resources. The workaround
// for now is to log the error, but don't return an error.
logger.Infof("%v", err)
}
if len(errArr) != 0 {
return fmt.Errorf(strings.Join(errArr, "\n"))
}
return nil
}
func cleanupLocalCmds(logger logrus.FieldLogger, commands ...exec.Cmd) error {
var errArr []string
for _, cmd := range commands {
logger.Infof("Sending an interrupt to the %s command (pid %d)", cmd.Path, cmd.Process.Pid)
err := cmd.Process.Signal(os.Interrupt)
if err != nil {
errArr = append(errArr, fmt.Sprintf("failed to interrupt pid %d: %v", cmd.Process.Pid, err))
}
err = cmd.Wait()
if err != nil {
_, ok := err.(*exec.ExitError)
if !ok {
logger.Infof("There was an error while waiting for the %s command to finish running: %v", cmd.Path, err)
errArr = append(errArr, fmt.Sprintf("failed to wait for the %s command to finish running: %v", cmd.Path, err))
}
}
}
if len(errArr) != 0 {
return fmt.Errorf(strings.Join(errArr, "\n"))
}
return nil
}
func CreateCatalogSource(logger logrus.FieldLogger, name, namespace, configMapName string, client olmclientv1alpha1.OperatorsV1alpha1Interface) error {
// check if the @name CatalogSource already exists and if true, exit early.
// If no CatalogSource exists by that name, start building up that object
// and attempt to create it through the OLM v1alpha1 client.
_, err := client.CatalogSources(namespace).Get(name, meta.GetOptions{})
if apierrors.IsNotFound(err) {
catsrc := &olmv1alpha1.CatalogSource{
ObjectMeta: meta.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: olmv1alpha1.CatalogSourceSpec{
SourceType: olmv1alpha1.SourceTypeConfigmap,
ConfigMap: configMapName,
DisplayName: configMapName,
Publisher: "Red Hat",
},
}
_, err := client.CatalogSources(namespace).Create(catsrc)
if err != nil {
return fmt.Errorf("failed to create the %s CatalogSource for metering: %v", name, err)
}
logger.Infof("Created the %s CatalogSource", name)
} else if err != nil {
return err
}
return nil
}
// VerifyCatalogSourcePod is a deployframework helper function that checks the @namespace
// and verifies that there's a ready Pod that was created by an OLM CatalogSource resource.
func VerifyCatalogSourcePod(logger logrus.FieldLogger, client kubernetes.Interface, packageName, namespace string) error {
// polling every three seconds, list all of the Pods in the @namespace, checking
// if any of those Pods match the `olm.catalogSource=@packageName` label selector.
// Continue polling until a single Pod is returned by that label selector query
// and that Pod is reporting a Ready stauts, or stop when the timeout period is reached.
err := wait.Poll(3*time.Second, 1*time.Minute, func() (done bool, err error) {
pods, err := client.CoreV1().Pods(namespace).List(meta.ListOptions{
LabelSelector: fmt.Sprintf("olm.catalogSource=%s", packageName),
})
if err != nil {
return false, err
}
if len(pods.Items) != 1 {
return false, nil
}
for _, pod := range pods.Items {
podIsReady, _ := checkPodStatus(pod)
if !podIsReady {
logger.Infof("Waiting for the %s Pod to become Ready", pod.Name)
return false, nil
}
}
return true, nil
})
if err != nil {
return fmt.Errorf("failed to wait for the %s catalogsource Pod to become ready: %v", packageName, err)
}
return nil
}
// CreateUpgradeConfigMap is a helper function responsible for creating a ConfigMap
// that contains the current version of the repositories' CRDs, CSV and metering-ocp package
// which OLM can then consume through a CatalogSource. In order to create this ConfigMap,
// we execute a bash script that handles the heavy-lifting, overriding any of the environment
// variables that the script uses, to match our current deployment context.
func CreateUpgradeConfigMap(logger logrus.FieldLogger, name, namespace, scriptPath string) error {
/*
Check if we are running in CI by getting the value of the
IMAGE_FORMAT environment variable that CI builds and exposes
for our job. If this value is non-empty, then the "update
configmap" script will override the containerImage field in the CSV.
Else, the containerImage will use the default origin images.
More information:
https://github.com/openshift/ci-tools/blob/master/TEMPLATES.md#image_format
*/
imageOverride := os.Getenv("IMAGE_FORMAT")
if imageOverride != "" {
imageOverride = strings.Replace(imageOverride, "${component}", "metering-ansible-operator", 1)
}
envVarArr := []string{
"IMAGE_OVERRIDE=" + imageOverride,
"NAMESPACE=" + namespace,
"NAME=" + name,
}
// build up the path to the ./hack/@scriptPath and stat that path,
// verifying it exists before running that bash script
relPath := filepath.Join(scriptPath, createUpgradeConfigMapScriptName)
createConfigMapScript, err := filepath.Abs(relPath)
if err != nil {
return fmt.Errorf("failed to get the absolute path for the '%s' path: %v", relPath, err)
}
_, err = os.Stat(createConfigMapScript)
if err != nil {
return fmt.Errorf("failed to stat the '%s' path: %v", createConfigMapScript, err)
}
cmd := exec.Command(createConfigMapScript)
cmd.Env = append(os.Environ(), envVarArr...)
stderr, _ := cmd.StderrPipe()
err = cmd.Start()
if err != nil {
return fmt.Errorf("failed to start running the %s script", createConfigMapScript)
}
scanner := bufio.NewScanner(stderr)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
}
// TODO(tflannag): add a timeout function that kills the cmd.Process
// https://medium.com/@vCabbage/go-timeout-commands-with-os-exec-commandcontext-ba0c861ed738
// https://github.com/golang/go/issues/9580#issuecomment-69724465
err = cmd.Wait()
if err != nil {
return fmt.Errorf("failed to wait until the %s script has finished running", createConfigMapScript)
}
return nil
}
// VerifyConfigMap is a helper function that polls until the @name ConfigMap
// has been created in the @namespace namespace.
func VerifyConfigMap(logger logrus.FieldLogger, client kubernetes.Interface, name, namespace string) error {
err := wait.Poll(1*time.Second, 45*time.Second, func() (done bool, err error) {
_, err = client.CoreV1().ConfigMaps(namespace).Get(name, meta.GetOptions{})
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
})
if err != nil {
return fmt.Errorf("failed to wait for the %s configmap to be created in the %s namespace: %v", name, namespace, err)
}
logger.Infof("The %s ConfigMap has been created in the %s namespace", name, namespace)
return nil
}
// UpdateExistingSubscription is a helper function responsible for upgrading an existing metering-ocp Subscription
// to use the newest payload and verify that the Subscription object is reporting a successful upgrade status.
func UpdateExistingSubscription(logger logrus.FieldLogger, client olmclientv1alpha1.OperatorsV1alpha1Interface, name, upgradeChannel, namespace string) error {
sub, err := client.Subscriptions(namespace).Get(name, meta.GetOptions{})
if apierrors.IsNotFound(err) {
return fmt.Errorf("the %s subscription does not exist", name)
}
if err != nil {
return err
}
// update the Subscription to use the most recent channel listed in the package.yaml
// and change the Subscription source type to use the contents of a CatalogSource.
sub.Spec.CatalogSource = name
sub.Spec.CatalogSourceNamespace = namespace
sub.Spec.Channel = upgradeChannel
_, err = client.Subscriptions(namespace).Update(sub)
if err != nil {
return err
}
logger.Infof("Updated the %s Subscription to use the %s channel", name, upgradeChannel)
// after updating the metering-ocp Subscription to use a newer channel,
// wait until this object is reporting a successful upgrade state before
// transferring control back to the function call site.
err = wait.Poll(3*time.Second, 1*time.Minute, func() (done bool, err error) {
sub, err := client.Subscriptions(namespace).Get(name, meta.GetOptions{})
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
logger.Infof("Waiting for the %s Subscription to finish upgrading", name)
if !strings.Contains(sub.Status.CurrentCSV, upgradeChannel) {
logger.Infof("Subscription status does not report metering-operator-v%s as the currentCSV", upgradeChannel)
return false, nil
}
if sub.Status.State != olmv1alpha1.SubscriptionStateAtLatest {
logger.Infof("Subscription status has not reported AtLatestKnown yet")
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("failed to wait for the %s subscription to finish updating in the %s namespace: %v", name, namespace, err)
}
return nil
}
// WaitForMeteringOperatorDeployment is a helper function that will poll for the @name
// deployment every ten seconds, waiting until that deployment reports a single signed
// 32-bit integer for both of the UpdatedReplicas and Replicas status fields, which will
// indicate a successful upgrade status.
func WaitForMeteringOperatorDeployment(logger logrus.FieldLogger, client kubernetes.Interface, name, namespace string) error {
err := wait.Poll(10*time.Second, 10*time.Minute, func() (done bool, err error) {
deployment, err := client.AppsV1().Deployments(namespace).Get(name, meta.GetOptions{})
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
logger.Infof("Waiting for the %s Deployment status to report a successful upgrade.", deployment.Name)
return deployment.Status.UpdatedReplicas == int32(1) && deployment.Status.Replicas == int32(1), nil
})
if err != nil {
return fmt.Errorf("failed to wait for the %s Deployment to finish updating in the %s namespace: %v", name, namespace, err)
}
logger.Infof("The %s Deployment has reported a successful upgrade status", name)
return nil
}
// WaitForReportingOperatorDeployment is a helper function that will poll for the @name
// deployment every twenty seconds, waiting until that deployment reports a successful
// upgrade status. Note: the reporting-operator deployment uses a RollingUpdate strategy
// which means we need to be careful about marking a deployment as "Ready" when there's
// two reporting-operator Pods in the @namespace. This means we should instead keep
// polling until there's a single replica.
func WaitForReportingOperatorDeployment(logger logrus.FieldLogger, client kubernetes.Interface, name, namespace string) error {
err := wait.Poll(20*time.Second, 10*time.Minute, func() (done bool, err error) {
deployment, err := client.AppsV1().Deployments(namespace).Get(name, meta.GetOptions{})
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
logger.Infof("Waiting for the %s Deployment status to report a successful upgrade.", deployment.Name)
return deployment.Status.UpdatedReplicas == int32(1) && deployment.Status.Replicas == int32(1) && deployment.Status.ObservedGeneration == int64(2), nil
})
if err != nil {
return fmt.Errorf("failed to wait for the %s Deployment to finish updating in the %s namespace: %v", name, namespace, err)
}
logger.Infof("The %s Deployment has reported a successful upgrade status", name)
return nil
}
func WaitForReportDataSources(logger logrus.FieldLogger, client meteringclient.MeteringV1Interface, namespace string) error {
err := wait.Poll(10*time.Second, 5*time.Minute, func() (done bool, err error) {
dataSources, err := client.ReportDataSources(namespace).List(meta.ListOptions{})
if err != nil {
return false, err
}
logger.Infof("Waiting for the ReportDataSoures to exist in the %s namespace", namespace)
return len(dataSources.Items) != 0, nil
})
if err != nil {
return fmt.Errorf("failed to wait %s namespace to existing ReportDataSources: %v", namespace, err)
}
logger.Infof("The %s namespace has ReportDataSources present", namespace)
return nil
}
func DeleteAllTestReports(logger logrus.FieldLogger, client meteringclient.MeteringV1Interface, namespace string) error {
err := client.Reports(namespace).DeleteCollection(&meta.DeleteOptions{}, meta.ListOptions{})
if err != nil {
return fmt.Errorf("failed to delete all the Reports in the %s namespace: %v", namespace, err)
}
logger.Infof("Deleted all of the Reports in the %s namespace", namespace)
return nil
}
func DeleteAllReportDataSources(logger logrus.FieldLogger, client meteringclient.MeteringV1Interface, namespace string) error {
err := client.ReportDataSources(namespace).DeleteCollection(&meta.DeleteOptions{}, meta.ListOptions{})
if err != nil {
return fmt.Errorf("failed to delete all the ReportDataSources in the %s namespace: %v", namespace, err)
}
logger.Infof("Deleted all of the ReportDataSources in the %s namespace", namespace)
return nil
}
|
[
"\"IMAGE_FORMAT\""
] |
[] |
[
"IMAGE_FORMAT"
] |
[]
|
["IMAGE_FORMAT"]
|
go
| 1 | 0 | |
src/main/java/Worker.java
|
import io.camunda.zeebe.client.ZeebeClient;
import io.camunda.zeebe.client.api.worker.JobWorker;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
/**
* Example application that connects to a cluster on Camunda Cloud, or a locally deployed cluster.
*
* <p>When connecting to a cluster in Camunda Cloud, this application assumes that the following
* environment variables are set:
*
* <ul>
* <li>ZEEBE_ADDRESS
* <li>ZEEBE_CLIENT_ID (implicitly required by {@code ZeebeClient} if authorization is enabled)
* <li>ZEEBE_CLIENT_SECRET (implicitly required by {@code ZeebeClient} if authorization is enabled)
* <li>ZEEBE_AUTHORIZATION_SERVER_URL (implicitly required by {@code ZeebeClient} if authorization is enabled)
* </ul>
*
* <p><strong>Hint:</strong> When you create client credentials in Camunda Cloud you have the option
* to download a file with above lines filled out for you.
*
* <p>When connecting to a local cluster, you only need to set {@code ZEEBE_ADDRESS}.
* This application also assumes that authentication is disabled for a locally deployed clusterL
*/
public class Worker {
private static final String JOB_TYPE = "greet";
public static void main(String[] args) throws InterruptedException {
System.out.println("Starting worker...");
final String zeebeAddress = getEnvironmentVariable("ZEEBE_ADDRESS");
System.out.println("Connecting to " + zeebeAddress);
ZeebeClient client = createZeebeClient(zeebeAddress);
System.out.println("Registering worker for jobType:" + JOB_TYPE);
final JobWorker jobWorker = client.newWorker().jobType(JOB_TYPE).handler(new WorkerJobHandler()).open();
final CountDownLatch countDownLatch = new CountDownLatch(1);
Runtime.getRuntime().addShutdownHook(
new Thread(() -> {
System.out.println("Closing worker for jobType:" + JOB_TYPE);
jobWorker.close();
System.out.println("Closing client connected to " + zeebeAddress);
client.close();
System.out.println("Worker Shutdown Complete");
countDownLatch.countDown();
})
);
countDownLatch.await();
}
private static ZeebeClient createZeebeClient(String gatewayAddress) {
if (gatewayAddress.contains("zeebe.camunda.io")) {
checkEnvVars("ZEEBE_CLIENT_ID", "ZEEBE_CLIENT_SECRET", "ZEEBE_AUTHORIZATION_SERVER_URL");
/* Connect to Camunda Cloud Cluster, assumes that credentials are set in environment variables.
* See JavaDoc on class level for details
*/
return ZeebeClient.newClientBuilder().gatewayAddress(gatewayAddress).build();
} else {
// connect to local deployment; assumes that authentication is disabled
return ZeebeClient.newClientBuilder().gatewayAddress(gatewayAddress).usePlaintext().build();
}
}
private static String getEnvironmentVariable(final String key) {
checkEnvVars(key);
final Map<String, String> envVars = System.getenv();
return envVars.get(key);
}
private static void checkEnvVars(String... keys) {
final Map<String, String> envVars = System.getenv();
for (String key : keys) {
if (!envVars.containsKey(key)) {
throw new IllegalStateException("Unable to find mandatory environment variable " + key);
}
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
sdk/go/keepclient/support.go
|
package keepclient
import (
"crypto/md5"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"os"
"regexp"
"strings"
"time"
"git.curoverse.com/arvados.git/sdk/go/streamer"
)
// Function used to emit debug messages. The easiest way to enable
// keepclient debug messages in your application is to assign
// log.Printf to DebugPrintf.
var DebugPrintf = func(string, ...interface{}) {}
func init() {
var matchTrue = regexp.MustCompile("^(?i:1|yes|true)$")
if matchTrue.MatchString(os.Getenv("ARVADOS_DEBUG")) {
DebugPrintf = log.Printf
}
}
type keepService struct {
Uuid string `json:"uuid"`
Hostname string `json:"service_host"`
Port int `json:"service_port"`
SSL bool `json:"service_ssl_flag"`
SvcType string `json:"service_type"`
ReadOnly bool `json:"read_only"`
}
// Md5String returns md5 hash for the bytes in the given string
func Md5String(s string) string {
return fmt.Sprintf("%x", md5.Sum([]byte(s)))
}
// Set timeouts applicable when connecting to non-disk services
// (assumed to be over the Internet).
func (*KeepClient) setClientSettingsNonDisk(client *http.Client) {
// Maximum time to wait for a complete response
client.Timeout = 300 * time.Second
// TCP and TLS connection settings
client.Transport = &http.Transport{
Dial: (&net.Dialer{
// The maximum time to wait to set up
// the initial TCP connection.
Timeout: 30 * time.Second,
// The TCP keep alive heartbeat
// interval.
KeepAlive: 120 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
}
// Set timeouts applicable when connecting to keepstore services directly
// (assumed to be on the local network).
func (*KeepClient) setClientSettingsDisk(client *http.Client) {
// Maximum time to wait for a complete response
client.Timeout = 20 * time.Second
// TCP and TLS connection timeouts
client.Transport = &http.Transport{
Dial: (&net.Dialer{
// The maximum time to wait to set up
// the initial TCP connection.
Timeout: 2 * time.Second,
// The TCP keep alive heartbeat
// interval.
KeepAlive: 180 * time.Second,
}).Dial,
TLSHandshakeTimeout: 4 * time.Second,
}
}
type svcList struct {
Items []keepService `json:"items"`
}
type uploadStatus struct {
err error
url string
statusCode int
replicas_stored int
response string
}
func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.ReadCloser,
upload_status chan<- uploadStatus, expectedLength int64, requestID int32) {
var req *http.Request
var err error
var url = fmt.Sprintf("%s/%s", host, hash)
if req, err = http.NewRequest("PUT", url, nil); err != nil {
DebugPrintf("DEBUG: [%08x] Error creating request PUT %v error: %v", requestID, url, err.Error())
upload_status <- uploadStatus{err, url, 0, 0, ""}
body.Close()
return
}
req.ContentLength = expectedLength
if expectedLength > 0 {
// http.Client.Do will close the body ReadCloser when it is
// done with it.
req.Body = body
} else {
// "For client requests, a value of 0 means unknown if Body is
// not nil." In this case we do want the body to be empty, so
// don't set req.Body. However, we still need to close the
// body ReadCloser.
body.Close()
}
req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
req.Header.Add("Content-Type", "application/octet-stream")
req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
var resp *http.Response
if resp, err = this.Client.Do(req); err != nil {
DebugPrintf("DEBUG: [%08x] Upload failed %v error: %v", requestID, url, err.Error())
upload_status <- uploadStatus{err, url, 0, 0, ""}
return
}
rep := 1
if xr := resp.Header.Get(X_Keep_Replicas_Stored); xr != "" {
fmt.Sscanf(xr, "%d", &rep)
}
defer resp.Body.Close()
defer io.Copy(ioutil.Discard, resp.Body)
respbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
response := strings.TrimSpace(string(respbody))
if err2 != nil && err2 != io.EOF {
DebugPrintf("DEBUG: [%08x] Upload %v error: %v response: %v", requestID, url, err2.Error(), response)
upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
} else if resp.StatusCode == http.StatusOK {
DebugPrintf("DEBUG: [%08x] Upload %v success", requestID, url)
upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
} else {
if resp.StatusCode >= 300 && response == "" {
response = resp.Status
}
DebugPrintf("DEBUG: [%08x] Upload %v error: %v response: %v", requestID, url, resp.StatusCode, response)
upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
}
}
func (this *KeepClient) putReplicas(
hash string,
tr *streamer.AsyncStream,
expectedLength int64) (locator string, replicas int, err error) {
// Generate an arbitrary ID to identify this specific
// transaction in debug logs.
requestID := rand.Int31()
// Calculate the ordering for uploading to servers
sv := NewRootSorter(this.WritableLocalRoots(), hash).GetSortedRoots()
// The next server to try contacting
next_server := 0
// The number of active writers
active := 0
// Used to communicate status from the upload goroutines
upload_status := make(chan uploadStatus)
defer func() {
// Wait for any abandoned uploads (e.g., we started
// two uploads and the first replied with replicas=2)
// to finish before closing the status channel.
go func() {
for active > 0 {
<-upload_status
}
close(upload_status)
}()
}()
replicasDone := 0
replicasTodo := this.Want_replicas
replicasPerThread := this.replicasPerService
if replicasPerThread < 1 {
// unlimited or unknown
replicasPerThread = replicasTodo
}
retriesRemaining := 1 + this.Retries
var retryServers []string
lastError := make(map[string]string)
for retriesRemaining > 0 {
retriesRemaining -= 1
next_server = 0
retryServers = []string{}
for replicasTodo > 0 {
for active*replicasPerThread < replicasTodo {
// Start some upload requests
if next_server < len(sv) {
DebugPrintf("DEBUG: [%08x] Begin upload %s to %s", requestID, hash, sv[next_server])
go this.uploadToKeepServer(sv[next_server], hash, tr.MakeStreamReader(), upload_status, expectedLength, requestID)
next_server += 1
active += 1
} else {
if active == 0 && retriesRemaining == 0 {
msg := "Could not write sufficient replicas: "
for _, resp := range lastError {
msg += resp + "; "
}
msg = msg[:len(msg)-2]
return locator, replicasDone, InsufficientReplicasError(errors.New(msg))
} else {
break
}
}
}
DebugPrintf("DEBUG: [%08x] Replicas remaining to write: %v active uploads: %v",
requestID, replicasTodo, active)
// Now wait for something to happen.
if active > 0 {
status := <-upload_status
active -= 1
if status.statusCode == 200 {
// good news!
replicasDone += status.replicas_stored
replicasTodo -= status.replicas_stored
locator = status.response
delete(lastError, status.url)
} else {
msg := fmt.Sprintf("[%d] %s", status.statusCode, status.response)
if len(msg) > 100 {
msg = msg[:100]
}
lastError[status.url] = msg
}
if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
(status.statusCode >= 500 && status.statusCode != 503) {
// Timeout, too many requests, or other server side failure
// Do not retry when status code is 503, which means the keep server is full
retryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, "/")])
}
} else {
break
}
}
sv = retryServers
}
return locator, replicasDone, nil
}
|
[
"\"ARVADOS_DEBUG\""
] |
[] |
[
"ARVADOS_DEBUG"
] |
[]
|
["ARVADOS_DEBUG"]
|
go
| 1 | 0 | |
google/appengine/api/user_service_stub.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Trivial implementation of the UserService."""
import os
import urllib
import urlparse
from google.appengine.api import apiproxy_stub
from google.appengine.api import user_service_pb
_DEFAULT_LOGIN_URL = 'https://www.google.com/accounts/Login?continue=%s'
_DEFAULT_LOGOUT_URL = 'https://www.google.com/accounts/Logout?continue=%s'
_DEFAULT_AUTH_DOMAIN = 'gmail.com'
_OAUTH_CONSUMER_KEY = 'example.com'
_OAUTH_EMAIL = '[email protected]'
_OAUTH_USER_ID = '0'
_OAUTH_AUTH_DOMAIN = _DEFAULT_AUTH_DOMAIN
class UserServiceStub(apiproxy_stub.APIProxyStub):
"""Trivial implementation of the UserService."""
def __init__(self,
login_url=_DEFAULT_LOGIN_URL,
logout_url=_DEFAULT_LOGOUT_URL,
service_name='user',
auth_domain=_DEFAULT_AUTH_DOMAIN,
http_server_address=None,
):
"""Initializer.
Args:
login_url: String containing the URL to use for logging in.
logout_url: String containing the URL to use for logging out.
service_name: Service name expected for all calls.
auth_domain: The authentication domain for the service e.g. "gmail.com".
http_server_address: The address of the application's HTTP server e.g.
"localhost:8080". If this is not set then the SERVER_NAME and
SERVER_PORT environment variables are used.
Note: Both the login_url and logout_url arguments must contain one format
parameter, which will be replaced with the continuation URL where the user
should be redirected after log-in or log-out has been completed.
"""
super(UserServiceStub, self).__init__(service_name)
self.__num_requests = 0
self._login_url = login_url
self._logout_url = logout_url
self._http_server_address = http_server_address
os.environ['AUTH_DOMAIN'] = auth_domain
def num_requests(self):
return self.__num_requests
def _Dynamic_CreateLoginURL(self, request, response):
"""Trivial implementation of UserService.CreateLoginURL().
Args:
request: a CreateLoginURLRequest
response: a CreateLoginURLResponse
"""
self.__num_requests += 1
response.set_login_url(
self._login_url %
urllib.quote(self._AddHostToContinueURL(request.destination_url())))
def _Dynamic_CreateLogoutURL(self, request, response):
"""Trivial implementation of UserService.CreateLogoutURL().
Args:
request: a CreateLogoutURLRequest
response: a CreateLogoutURLResponse
"""
self.__num_requests += 1
response.set_logout_url(
self._logout_url %
urllib.quote(self._AddHostToContinueURL(request.destination_url())))
def _Dynamic_GetOAuthUser(self, unused_request, response):
"""Trivial implementation of UserService.GetOAuthUser().
Args:
unused_request: a GetOAuthUserRequest
response: a GetOAuthUserResponse
"""
self.__num_requests += 1
response.set_email(_OAUTH_EMAIL)
response.set_user_id(_OAUTH_USER_ID)
response.set_auth_domain(_OAUTH_AUTH_DOMAIN)
def _Dynamic_CheckOAuthSignature(self, unused_request, response):
"""Trivial implementation of UserService.CheckOAuthSignature().
Args:
unused_request: a CheckOAuthSignatureRequest
response: a CheckOAuthSignatureResponse
"""
self.__num_requests += 1
response.set_oauth_consumer_key(_OAUTH_CONSUMER_KEY)
def _AddHostToContinueURL(self, continue_url):
"""Adds the request host to the continue url if no host is specified.
Args:
continue_url: the URL which may or may not have a host specified
Returns:
string
"""
(protocol, host, path, parameters, query, fragment) = urlparse.urlparse(continue_url, 'http')
if host:
return continue_url
if self._http_server_address:
host = self._http_server_address
else:
host = os.environ['SERVER_NAME']
if os.environ['SERVER_PORT'] != '80':
host = host + ":" + os.environ['SERVER_PORT']
if path == '':
path = '/'
return urlparse.urlunparse(
(protocol, host, path, parameters, query, fragment))
|
[] |
[] |
[
"AUTH_DOMAIN",
"SERVER_PORT",
"SERVER_NAME"
] |
[]
|
["AUTH_DOMAIN", "SERVER_PORT", "SERVER_NAME"]
|
python
| 3 | 0 | |
server/src/main/java/com/server/Area/Controller.java
|
package com.server.Area;
import com.server.Area.Actions;
import java.util.concurrent.atomic.AtomicLong;
import java.sql.*;
import java.io.*;
import java.lang.*;
import java.security.Principal;
import com.server.Area.User;
import java.util.Random;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.ListIterator;
import twitter4j.Status;
import twitter4j.Twitter;
import twitter4j.TwitterException;
import twitter4j.TwitterFactory;
import twitter4j.auth.AccessToken;
import twitter4j.auth.RequestToken;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.springframework.web.bind.annotation.CrossOrigin;
import org.springframework.web.servlet.config.annotation.CorsRegistry;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.web.servlet.view.RedirectView;
import io.swagger.annotations.Api;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableScheduling;
@Configuration
@EnableScheduling
@CrossOrigin(maxAge = 3600)
@RestController
@Api(value="Authentification", description="Routes for login & register")
public class Controller {
Connection c = null;
PreparedStatement stmt = null;
boolean isLogged = false;
int id;
private static String EMPTY = "";
Twitter twitter = TwitterFactory.getSingleton();
AccessToken accessTokenTwitter = null;
RequestToken requestToken = null;
BufferedReader br = null;
public Controller() {
id = 0;
try {
Class.forName("org.postgresql.Driver");
c = DriverManager
.getConnection("jdbc:postgresql://db:5432/" + System.getenv("POSTGRES_DB"),
System.getenv("POSTGRES_USER"), System.getenv("POSTGRES_PASSWORD"));
CreateTableDataBase(c, stmt);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getClass().getName()+": "+e.getMessage());
System.exit(0);
}
//Actions.twitchStreamerIsOnline(1, "wisethug", c, stmt);
//Actions.youtubeGetNumberFriends(1, c, stmt);
//Actions.youtubeGetVideosLike(1, c, stmt);
//Actions.youtubeGetVideosDislike(1, c, stmt);
//Actions.githubGetRepo(34,c , stmt);
//Actions.githubGetCommentsRepo(34, "Martouche/BSQ", c, stmt);
//Actions.githubGetCommitsRepo(34, "Martouche/BSQ", c, stmt);
//Actions.githubPostComment(34,"Martouche", "BSQ","6f9d387ca6e1220fe9488180469d05084c72ca35", c , stmt);
//Actions.githubReactionComments(34, "Martouche", "BSQ", "37507663", c , stmt );
//Actions.youtubeReactionNewFriend(1,"xMrClyde", c, stmt );
//Reactions.gmailSendMail(1,"[email protected]", c, stmt );
//Actions.youtubeNewFriend()
//Actions.spotifyGetPlaylist(1, c, stmt);
}
public void CreateTableDataBase(Connection c, PreparedStatement stmt) {
// Table users
try {
stmt = c.prepareStatement("CREATE TABLE IF NOT EXISTS users (id SERIAL PRIMARY KEY, name VARCHAR(250) NOT NULL, password VARCHAR(250) NULL, type VARCHAR(250) NOT NULL);");
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
Random rand = new Random();
String GoogleId = Integer.toString(rand.nextInt(1000));
String SpotifyId = Integer.toString(rand.nextInt(1000));
String GithubId = Integer.toString(rand.nextInt(1000));
String LinkedinId = Integer.toString(rand.nextInt(1000));
String DiscordId = Integer.toString(rand.nextInt(1000));
String FacebookId = Integer.toString(rand.nextInt(1000));
String RedditId = Integer.toString(rand.nextInt(1000));
String TwitterId = Integer.toString(rand.nextInt(1000));
String TwitchId = Integer.toString(rand.nextInt(1000));
// Table Service
try {
stmt = c.prepareStatement("CREATE TABLE IF NOT EXISTS services (id INT NOT NULL, name VARCHAR(250) NOT NULL);" +
"INSERT INTO services (id, name) SELECT " + GoogleId + ", 'google' WHERE NOT EXISTS (SELECT * FROM services where name='google');" +
"INSERT INTO services (id, name) SELECT " + SpotifyId + ", 'spotify' WHERE NOT EXISTS (SELECT * FROM services where name='spotify');" +
"INSERT INTO services (id, name) SELECT " + GithubId + ", 'github' WHERE NOT EXISTS (SELECT * FROM services where name='github');" +
"INSERT INTO services (id, name) SELECT " + LinkedinId + ", 'linkdedin' WHERE NOT EXISTS (SELECT * FROM services where name='linkdedin');" +
"INSERT INTO services (id, name) SELECT " + DiscordId + ", 'discord' WHERE NOT EXISTS (SELECT * FROM services where name='discord');" +
"INSERT INTO services (id, name) SELECT " + FacebookId + ", 'facebook' WHERE NOT EXISTS (SELECT * FROM services where name='facebook');" +
"INSERT INTO services (id, name) SELECT " + RedditId + ", 'reddit' WHERE NOT EXISTS (SELECT * FROM services where name='reddit');" +
"INSERT INTO services (id, name) SELECT " + TwitchId + ", 'twitch' WHERE NOT EXISTS (SELECT * FROM services where name='twitch');" +
"INSERT INTO services (id, name) SELECT " + TwitterId + ", 'twitter' WHERE NOT EXISTS (SELECT * FROM services where name='twitter');");
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
// Table User token Service
try {
stmt = c.prepareStatement("CREATE TABLE IF NOT EXISTS user_service_token (id_user VARCHAR(250), " +
"google_token VARCHAR(250), github_token VARCHAR(250), linkedin_token VARCHAR(250), " +
"spotify_token VARCHAR(250), discord_token VARCHAR(250), facebook_token VARCHAR(250), " +
"reddit_token VARCHAR(250), twitter_token VARCHAR(250), twitch_token VARCHAR(250));");
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
// Table Service Action
try {
stmt = c.prepareStatement("CREATE TABLE IF NOT EXISTS services_actions (id INT NOT NULL, id_service INT NOT NULL, name VARCHAR(250) NOT NULL);" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GoogleId + ", 'gmailNewMail' WHERE NOT EXISTS (SELECT * FROM services_actions where name='gmailNewMail');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", 0, 'wetherTemperatureMax' WHERE NOT EXISTS (SELECT * FROM services_actions where name='wetherTemperatureMax');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", 0, 'wetherTemperatureMin' WHERE NOT EXISTS (SELECT * FROM services_actions where name='wetherTemperatureMin');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", 0, 'wetherHumidityMin' WHERE NOT EXISTS (SELECT * FROM services_actions where name='wetherHumidityMin');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", 0, 'wetherHumidityMax' WHERE NOT EXISTS (SELECT * FROM services_actions where name='wetherHumidityMax');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + TwitchId + ", 'twitchStreamerIsOnline' WHERE NOT EXISTS (SELECT * FROM services_actions where name='twitchStreamerIsOnline');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GoogleId + ", 'youtubeNewFriend' WHERE NOT EXISTS (SELECT * FROM services_actions where name='youtubeNewFriend');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GoogleId + ", 'youtubeLikingVideo' WHERE NOT EXISTS (SELECT * FROM services_actions where name='youtubeLikingVideo');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GoogleId + ", 'youtubeDislikingVideo' WHERE NOT EXISTS (SELECT * FROM services_actions where name='youtubeDislikingVideo');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GithubId + ", 'githubNewRepo' WHERE NOT EXISTS (SELECT * FROM services_actions where name='githubNewRepo');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GithubId + ", 'githubNewCommitsRepo' WHERE NOT EXISTS (SELECT * FROM services_actions where name='githubNewCommitsRepo');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + SpotifyId + ", 'spotifyListenJul' WHERE NOT EXISTS (SELECT * FROM services_actions where name='spotifyListenJul');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + SpotifyId + ", 'spotifyListen' WHERE NOT EXISTS (SELECT * FROM services_actions where name='spotifyListen');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GithubId + ", 'githubNewCommentsRepo' WHERE NOT EXISTS (SELECT * FROM services_actions where name='githubNewCommentsRepo');" +
"INSERT INTO services_actions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + SpotifyId + ", 'spotifyNewPlaylist' WHERE NOT EXISTS (SELECT * FROM services_actions where name='spotifyNewPlaylist');");
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
// Table Service Reaction
try {
stmt = c.prepareStatement("CREATE TABLE IF NOT EXISTS services_reactions (id INT NOT NULL, id_service INT NOT NULL, name VARCHAR(250) NOT NULL);" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GithubId + ", 'githubPostComment' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='githubPostComment');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GithubId + ", 'githubCreateRepo' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='githubCreateRepo');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GithubId + ", 'githubReactionComments' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='githubReactionComments');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + TwitterId + ", 'twitterNewPost' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='twitterNewPost');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GoogleId + ", 'gmailSendMail' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='gmailSendMail');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + GoogleId + ", 'youtubeReactionNewFriend' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='youtubeReactionNewFriend');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + SpotifyId + ", 'spotifyVolumeMax' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='spotifyVolumeMax');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + SpotifyId + ", 'spotifyPause' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='spotifyPause');" +
"INSERT INTO services_reactions (id, id_service, name) SELECT " + Integer.toString(rand.nextInt(1000)) + ", " + SpotifyId + ", 'spotifyNext' WHERE NOT EXISTS (SELECT * FROM services_reactions where name='spotifyNext');");
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
// Table User Actions Reaction
try {
stmt = c.prepareStatement("CREATE TABLE IF NOT EXISTS user_actions_reactions (id_user INT NOT NULL, id_service_action INT NOT NULL, value_service_action VARCHAR(250) NOT NULL, id_service_reaction INT NOT NULL, value_service_reaction VARCHAR(250) NOT NULL);");
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
}
@RequestMapping(value = "/about.json", method = RequestMethod.GET)
public List<String> AboutJson() {
List<String> records = new ArrayList<String>();
System.out.println("Working Directory = " +
System.getProperty("user.dir"));
try
{
BufferedReader reader = new BufferedReader(new FileReader("/usr/app/about.json"));
String line;
while ((line = reader.readLine()) != null)
{
records.add(line);
}
reader.close();
return records;
}
catch (Exception e)
{
System.err.format("Exception occurred trying to read '%s'.", "about.json");
e.printStackTrace();
return null;
}
}
@RequestMapping(value = "/logout", method = RequestMethod.GET)
public RedirectView logoutUser() {
System.out.println("Je suis logout dans le serveur");
isLogged = false;
id = 0;
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/login");
return redirectView;
}
@RequestMapping(value = "/register", method = RequestMethod.GET)
public RedirectView registerPost(@RequestParam(value = "name") String name, @RequestParam(value = "pwd") String pwd) {
RegisterController mine = new RegisterController(name, pwd, c, stmt);
if (!isLogged) {
id = mine.id;
isLogged = true;
}
RedirectView redirectView = new RedirectView();
if (mine.state == 1)
redirectView.setUrl("http://localhost:8081/signup?value=error1");
else
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/login", method = RequestMethod.GET)
public RedirectView loginPost(@RequestParam(value = "name") String name, @RequestParam(value = "pwd") String pwd) {
LoginController mine = new LoginController(name, pwd, c, stmt);
if (!isLogged) {
id = mine.id;
isLogged = true;
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
// Discord Routes
@RequestMapping(value = "/oauth2/autorize/discord", method = RequestMethod.GET)
public RedirectView getUrlAutorizeDiscord() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://discordapp.com/api/oauth2/authorize?client_id=679280369891147807&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Foauth2%2Fcallback%2Fdiscord&response_type=code&scope=identify%20email");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/discord", method = RequestMethod.GET)
public RedirectView getTokenDiscord(@RequestParam(value = "code") String code) {
System.out.println("mon code Discord = " + code);
DiscordController mine = new DiscordController(id, code, c, stmt);
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/discord", method = RequestMethod.GET)
public RedirectView logoutDiscord(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET discord_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Twitch Routes
@RequestMapping(value = "/oauth2/autorize/twitch", method = RequestMethod.GET)
public RedirectView getUrlAutorizeTwitch() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://id.twitch.tv/oauth2/authorize?response_type=code&client_id=riddoiwsiud1uyk92zkzwrdgipurqp&redirect_uri=http://localhost:8080/oauth2/callback/twitch&scope=viewing_activity_read&state=c3ab8aa609ea11e793aa92361f002672");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/twitch", method = RequestMethod.GET)
public RedirectView getTokenTwitch(@RequestParam(value = "code") String code) {
System.out.println("mon code twitch = " + code);
TwitchController mine = new TwitchController(id, code, c, stmt);
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/twitch", method = RequestMethod.GET)
public RedirectView logoutTwitch(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET twitch_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Reddit Routes
@RequestMapping(value = "/oauth2/autorize/reddit", method = RequestMethod.GET)
public RedirectView getUrlAutorizeReddit() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://www.reddit.com/api/v1/authorize?client_id=O8RWcER1WbCJpg&response_type=code&state=adeidhiahidlhde&redirect_uri=http://localhost:8080/oauth2/callback/reddit&duration=permanent&scope=*");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/reddit", method = RequestMethod.GET)
public RedirectView getTokenReddit(@RequestParam(value = "code") String code) {
System.out.println("mon code reddit = " + code);
RedditController mine = new RedditController(id, code, c, stmt);
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/reddit", method = RequestMethod.GET)
public RedirectView logoutReddit(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET reddit_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Facebook Routes
@RequestMapping(value = "/oauth2/autorize/facebook", method = RequestMethod.GET)
public RedirectView getUrlAutorizeFacebook() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://www.facebook.com/v6.0/dialog/oauth?client_id=208135047001196&redirect_uri=http://localhost:8080/oauth2/callback/facebook&state=st=state123abc,ds=123456789&scope=email");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/facebook", method = RequestMethod.GET)
public RedirectView getTokenFacebook(@RequestParam(value = "code") String code) {
System.out.println("mon code Facebook = " + code);
FacebookController mine = new FacebookController(id, code, c, stmt);
RedirectView redirectView = new RedirectView();
System.out.println("mon id sorti Facebook = " + id);
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/facebook", method = RequestMethod.GET)
public RedirectView logoutFacebook(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET facebook_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Twitter Routes
@RequestMapping(value = "/oauth2/autorize/twitter", method = RequestMethod.GET)
public RedirectView getUrlAutorizeTwitter() throws Exception {
BufferedReader br = null;
String clientId = "RyDqv5K1O7VcivZjVUY7oppsS";
String clientSecret = "kEJUgA7vzCmtpydZ13bO2WgY2FcBnAwqMl27E0jo1edBiMIHHZ";
twitter.setOAuthConsumer(clientId, clientSecret);
requestToken = twitter.getOAuthRequestToken();
br = new BufferedReader(new InputStreamReader(System.in));
RedirectView redirectView = new RedirectView(requestToken.getAuthorizationURL());
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/twitter", method = RequestMethod.GET)
public RedirectView getTokenTwitter(@RequestParam(value = "oauth_token") String oauth_token, @RequestParam(value = "oauth_verifier") String oauth_verifier) {
try{
if(oauth_token.length() > 0){
this.accessTokenTwitter = this.twitter.getOAuthAccessToken(requestToken, oauth_verifier);
}else{
this.accessTokenTwitter = this.twitter.getOAuthAccessToken();
}
} catch (TwitterException te) {
if(401 == te.getStatusCode()){
System.out.println("Unable to get the access token.");
}else{
te.printStackTrace();
}
}
System.out.println("acces token twitter " + this.accessTokenTwitter);
TwitterController mine = new TwitterController(id, "fakeaccestokenpasbesoinenfaite", c, stmt);
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/twitter", method = RequestMethod.GET)
public RedirectView logoutTwitter(@RequestParam(value = "userid") String userId) {
twitter = TwitterFactory.getSingleton();
try {
stmt = c.prepareStatement("UPDATE user_service_token SET twitter_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Linkedin Routes
@RequestMapping(value = "/oauth2/autorize/linkedin", method = RequestMethod.GET)
public RedirectView getUrlAutorizeLinkedin() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://www.linkedin.com/oauth/v2/authorization?response_type=code&client_id=86yu19zq37j60p&redirect_uri=http://localhost:8080/oauth2/callback/linkedin?&scope=r_liteprofile%20r_emailaddress%20w_member_social");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/linkedin", method = RequestMethod.GET)
public RedirectView getTokenLinkedin(@RequestParam(value = "code") String code) {
System.out.println("mon code linkedin = " + code);
LinkedinController mine = new LinkedinController(id, code, c, stmt);
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/linkedin", method = RequestMethod.GET)
public RedirectView logoutLinkedin(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET linkedin_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Spotify Routes
@RequestMapping(value = "/oauth2/autorize/spotify", method = RequestMethod.GET)
public RedirectView getUrlAutorizeSpotify() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://accounts.spotify.com/authorize?client_id=b348a012872f4fe78567e7cea9e20c7c&response_type=code&redirect_uri=http://localhost:8080/oauth2/callback/spotify&scope=user-read-private+user-read-currently-playing+user-read-playback-state+user-modify-playback-state+user-library-read+user-follow-read+playlist-read-private+playlist-read-collaborative");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/spotify", method = RequestMethod.GET)
public RedirectView getTokenSpotify(@RequestParam(value = "code") String code) {
SpotifyController mine = new SpotifyController(id, code, c, stmt);
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/spotify", method = RequestMethod.GET)
public RedirectView logoutSpotify(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET spotify_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Github Routes
@RequestMapping(value = "/oauth2/autorize/github", method = RequestMethod.GET)
public RedirectView getUrlAutorizeGithub() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://github.com/login/oauth/authorize?scope=user:email,repo&client_id=1b8ddffb28f26996c08f");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/github", method = RequestMethod.GET)
public RedirectView getTokenGitHub(@RequestParam(value = "code") String code) {
GitHubController mine = new GitHubController(id, code, c, stmt);
if (!isLogged) {
id = mine.id;
isLogged = true;
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/github", method = RequestMethod.GET)
public RedirectView logoutGithub(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET github_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
// Google Routes
@RequestMapping(value = "/oauth2/autorize/google", method = RequestMethod.GET)
public RedirectView getUrlAutorizeGoogle() {
RedirectView redirectView = new RedirectView();
redirectView.setUrl("https://accounts.google.com/o/oauth2/v2/auth?access_type=offline&scope=https%3A%2F%2Fmail.google.com+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fgmail.send+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fuserinfo.email+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fyoutube&response_type=code&client_id=377968007025-013sa07vehs51n1rau6qfmplp7esq964.apps.googleusercontent.com&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Foauth2%2Fcallback%2Fgoogle");
return redirectView;
}
@RequestMapping(value = "/oauth2/callback/google", method = RequestMethod.GET)
public RedirectView getTokenGoogle(@RequestParam(value = "code") String code) {
GoogleController mine = new GoogleController(id, code, c, stmt);
if (!isLogged) {
id = mine.id;
isLogged = true;
}
System.out.println("mon putain d'id = " + mine.getId());
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + id + "");
return redirectView;
}
@RequestMapping(value = "/oauth2/logout/google", method = RequestMethod.GET)
public RedirectView logoutGoogle(@RequestParam(value = "userid") String userId) {
try {
stmt = c.prepareStatement("UPDATE user_service_token SET google_token = NULL WHERE id_user = '" + userId + "';");
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId + "");
return redirectView;
}
@RequestMapping(value = "/getId", method = RequestMethod.GET)
public String GetId(@RequestParam(value = "email") String email) {
String id = null;
try {
stmt = c.prepareStatement("SELECT id FROM users WHERE email = '" + email + "' ;");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
id = rs.getString("text");
}
rs.close();
} catch (Exception e) {
System.out.println(e);
}
return id;
}
public List<String> getServiceIdByName(String[] nameService) {
List<String> data = new ArrayList<String>();
int size = nameService.length;
for (int i=0; i<size; i++) {
try {
stmt = c.prepareStatement("SELECT id FROM services where name='" + nameService[i] + "'");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
data.add(Integer.toString(rs.getInt(1)));
}
rs.close();
}catch (Exception e) {
System.out.println(e);
}
}
return data;
}
public List<String> getNameReactionByServiceId(List<String> idServices) {
List<String> data = new ArrayList<String>();
for(String idservice : idServices) {
System.out.println(idservice);
try {
stmt = c.prepareStatement("SELECT name FROM services_reactions WHERE id_service = '" + idservice + "'");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
data.add(rs.getString("name"));
}
rs.close();
} catch (Exception e) {
System.out.println(e);
}
}
System.out.println("mes reactions dispo");
for (String namereaction : data)
System.out.println("-> " + namereaction);
return data;
}
public List<String> getNameActionByServiceId(List<String> idServices) {
List<String> data = new ArrayList<String>();
try {
stmt = c.prepareStatement("SELECT name FROM services_actions WHERE id_service = '0';");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
data.add(rs.getString("name"));
}
rs.close();
} catch (Exception e) {
System.out.println(e);
}
for(String idservice : idServices) {
System.out.println(idservice);
try {
stmt = c.prepareStatement("SELECT name FROM services_actions WHERE id_service = '" + idservice + "';");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
System.out.println("caca " + rs.getString("name"));
data.add(rs.getString("name"));
}
rs.close();
} catch (Exception e) {
System.out.println(e);
}
}
System.out.println("mes actions dispo");
for (String nameaction : data)
System.out.println("-> " + nameaction);
return data;
}
public String[] getServiceByUser(String userId) {
String[] data = new String[9];
try {
stmt = c.prepareStatement("SELECT * FROM user_service_token WHERE id_user = '" + userId + "'");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
data[0] = (rs.getString(2) == null) ? null : "google";
data[1] = (rs.getString(3) == null) ? null : "github";
data[2] = (rs.getString(4) == null) ? null : "linkedin";
data[3] = (rs.getString(5) == null) ? null : "spotify";
data[4] = (rs.getString(6) == null) ? null : "discord";
data[5] = (rs.getString(7) == null) ? null : "facebook";
data[6] = (rs.getString(8) == null) ? null : "reddit";
data[7] = (rs.getString(9) == null) ? null : "twitter";
data[8] = (rs.getString(10) == null) ? null : "twitch";
}
rs.close();
} catch (Exception e) {
System.out.println(e);
}
int size = data.length;
for (int i=0; i<size; i++) {
System.out.println(data[i]);
}
System.out.println("mes column");
return data;
}
@CrossOrigin
@RequestMapping(value = "/getActionForUser", method = RequestMethod.GET)
public String GetAction(@RequestParam(value = "userid") String userId) {
System.out.println("monuid user dans ma req getaction " + userId);
String[] serviceName = getServiceByUser(userId);
List<String> serviceId = getServiceIdByName(serviceName);
List<String> actionName = getNameActionByServiceId(serviceId);
String json = new Gson().toJson(actionName);
System.out.println("mon JSON : " + json);
return json;
}
@CrossOrigin
@RequestMapping(value = "/getReactionForUser", method = RequestMethod.GET)
public String GetReaction(@RequestParam(value = "userid") String userId) {
System.out.println("monuid user dans ma req getreaction" + userId);
String[] serviceName = getServiceByUser(userId);
List<String> serviceId = getServiceIdByName(serviceName);
List<String> actionName = getNameReactionByServiceId(serviceId);
String json = new Gson().toJson(actionName);
System.out.println("mon JSON : " + json);
return json;
}
@CrossOrigin
@RequestMapping(value = "/getServiceForUser", method = RequestMethod.GET)
public String GetService(@RequestParam(value = "userid") String userId) {
System.out.println("monuid user dans ma req getServiceForUser" + userId);
List<String> newservicename = new ArrayList<String>();
String[] serviceName = getServiceByUser(userId);
int size = serviceName.length;
for (int i=0; i<size; i++) {
if (serviceName[i] != null)
newservicename.add(serviceName[i]);
System.out.println(serviceName[i]);
}
String json = new Gson().toJson(newservicename);
System.out.println("mon JSON : " + json);
return json;
}
@CrossOrigin
@RequestMapping(value = "/getActionReactionByUser", method = RequestMethod.GET)
public String GetActionReaction(@RequestParam(value = "userid") String userId) {
System.out.println("monuid user dans ma req getActionReactionByUser" + userId);
String valueaction = "";
String valuereaction = "";
List<String> allactionreaction = new ArrayList<String>();
try {
stmt = c.prepareStatement("SELECT * FROM user_actions_reactions WHERE id_user = " + userId + "");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
String nameaction = getActionNamebyId(rs.getInt("id_service_action"));
String namereaction = getReactionNamebyId(rs.getInt("id_service_reaction"));
if (rs.getString("value_service_action").isEmpty()) {
valueaction = "null";
} else {
valueaction = rs.getString("value_service_action");
}
if (rs.getString("value_service_reaction").isEmpty()) {
valuereaction = "null";
} else {
valuereaction = rs.getString("value_service_reaction");
}
System.out.println(valueaction);
System.out.println(valuereaction);
allactionreaction.add(nameaction + ":" + valueaction + "|" + namereaction + ":" + valuereaction);
}
rs.close();
} catch (Exception e) {
System.out.println(e);
}
String json = new Gson().toJson(allactionreaction);
System.out.println("mon JSON11 : " + json);
return json;
}
@RequestMapping(value = "/getEmail", method = RequestMethod.GET)
public String GetEmail(@RequestParam(value = "id") String id) {
String email = null;
try {
stmt = c.prepareStatement("SELECT email FROM users WHERE id = '" + id + "' ;");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
email = rs.getString("text");
}
rs.close();
} catch (Exception e) {
System.out.println(e);
}
return email;
}
public String getReactionNamebyId(int idAction) {
try {
stmt = c.prepareStatement("SELECT name FROM services_reactions WHERE id = " + idAction + ";");
ResultSet rs = stmt.executeQuery();
while (rs.next())
return rs.getString(1);
} catch (Exception e) {
System.out.println(e);
}
return null;
}
public String getActionNamebyId(int idAction) {
try {
stmt = c.prepareStatement("SELECT name FROM services_actions WHERE id = " + idAction + ";");
ResultSet rs = stmt.executeQuery();
while (rs.next())
return rs.getString(1);
} catch (Exception e) {
System.out.println(e);
}
return null;
}
public int getActionIdbyName(String nameAction) {
try {
stmt = c.prepareStatement("SELECT id FROM services_actions WHERE name = '" + nameAction + "';");
ResultSet rs = stmt.executeQuery();
while (rs.next())
return rs.getInt(1);
} catch (Exception e) {
System.out.println(e);
}
return 0;
}
public int getReactionIdbyName(String nameReaction) {
try {
stmt = c.prepareStatement("SELECT id FROM services_reactions WHERE name = '" + nameReaction + "';");
ResultSet rs = stmt.executeQuery();
while (rs.next())
return rs.getInt(1);
} catch (Exception e) {
System.out.println(e);
}
return 0;
}
@RequestMapping(value = "/deleteActionReactionForUser", method = RequestMethod.GET)
public RedirectView deleteActionReaction(@RequestParam(value = "userid") String userId,
@RequestParam(value = "actionName") String actionName,
@RequestParam(value = "actionValue") String actionValue,
@RequestParam(value = "reactionName") String reactionName,
@RequestParam(value = "reactionValue") String reactionValue) {
int id_service_action = getActionIdbyName(actionName);
int id_service_reaction = getReactionIdbyName(reactionName);
System.out.println("mon actionValue - " + actionValue + " et ma reactionValue - " + reactionValue);
if (actionValue.equals(null)) {
actionValue = "";
System.out.println("ma nouvelle action value - " + actionValue);
}
if (reactionValue.equals(null))
reactionValue = "";
int int_user_id = Integer.parseInt(userId);
RedirectView redirectView = new RedirectView();
redirectView.setUrl("http://localhost:8081/home?id=" + userId);
try {
stmt = c.prepareStatement("DELETE FROM user_actions_reactions WHERE id_user = " + userId + " AND id_service_action = " + id_service_action + " AND value_service_action = '" + actionValue + "' AND id_service_reaction = " + id_service_reaction + " AND value_service_reaction = '" + reactionValue + "';");
System.out.println("ma requete quand je delete : " + stmt);
stmt.execute();
} catch (Exception e) {
System.out.println(e);
}
return redirectView;
}
@RequestMapping(value = "/postActionReactionForUser", method = RequestMethod.GET)
public String PostActionReaction(@RequestParam(value = "userid") String userId,
@RequestParam(value = "actionName") String actionName,
@RequestParam(value = "actionValue") String actionValue,
@RequestParam(value = "reactionName") String reactionName,
@RequestParam(value = "reactionValue") String reactionValue) {
int id_service_action = getActionIdbyName(actionName);
int id_service_reaction = getReactionIdbyName(reactionName);
int int_user_id = Integer.parseInt(userId);
if (actionValue.equals(null) || EMPTY.equals(actionValue))
actionValue = "null";
if (reactionValue.equals(null) || EMPTY.equals(reactionValue))
reactionValue = "null";
System.out.println("JE SUIS DANS LE POST DES ACTION REACTION");
System.out.println(userId);
System.out.println(actionName);
System.out.println(actionValue);
System.out.println(reactionName);
System.out.println(reactionValue);
if (actionName.equals("gmailNewMail"))
actionValue = String.valueOf(Actions.getGmailCurrentValueNumberMail(int_user_id, c, stmt));
if (actionName.equals("spotifyNewPlaylist"))
actionValue = String.valueOf(Actions.spotifyGetPlaylist(int_user_id, c, stmt));
if (actionName.equals("youtubeNewFriend"))
actionValue = String.valueOf(Actions.youtubeGetNumberFriends(int_user_id, c, stmt));
if (actionName.equals("youtubeLikingVideo"))
actionValue = String.valueOf(Actions.youtubeGetVideosLike(int_user_id, c, stmt));
if (actionName.equals("youtubeDislikingVideo"))
actionValue = String.valueOf(Actions.youtubeGetVideosDislike(int_user_id, c, stmt));
if (actionName.equals("githubNewRepo"))
actionValue = String.valueOf(Actions.githubGetRepo(int_user_id, c, stmt));
if (actionName.equals("githubNewCommitsRepo"))
actionValue = actionValue + ":" + Actions.githubGetCommitsRepo(int_user_id, actionValue, c, stmt);
if (actionName.equals("githubNewCommentsRepo"))
actionValue = actionValue + ":" + Actions.githubGetCommentsRepo(int_user_id, actionValue, c, stmt);
try {
stmt = c.prepareStatement("INSERT INTO user_actions_reactions " +
"(id_user, id_service_action, value_service_action, id_service_reaction, value_service_reaction) " +
"SELECT " + userId + ", " + id_service_action + ", '" + actionValue + "'," + id_service_reaction + ", '" + reactionValue + "';");
stmt.execute();
System.out.println("mon post marche");
return "work";
} catch (Exception e) {
System.out.println(e);
}
return null;
}
@Scheduled(cron = "*/5 * * * * *")
public void updateDataBase() {
try {
stmt = c.prepareStatement("SELECT * FROM user_actions_reactions");
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
int user_id = rs.getInt("id_user");
int action_id = rs.getInt("id_service_action");
String action_value = rs.getString("value_service_action");
int reaction_id = rs.getInt("id_service_reaction");
String reaction_value = rs.getString("value_service_reaction");
boolean resultaction = false;
String nameAction = getActionNamebyId(action_id);
System.out.println("mon action :" + nameAction + " -> value : " + action_value);
if (nameAction.equals("gmailNewMail"))
resultaction = Actions.gmailNewMail(user_id, action_value, c, stmt);
if (nameAction.equals("youtubeNewFriend"))
resultaction = Actions.youtubeNewFriend(user_id, action_value, c, stmt);
if (nameAction.equals("youtubeLikingVideo"))
resultaction = Actions.youtubeLikingVideo(user_id, action_value, c, stmt);
if (nameAction.equals("youtubeDislikingVideo"))
resultaction = Actions.youtubeDislikingVideo(user_id, action_value, c, stmt);
if (nameAction.equals("githubNewRepo"))
resultaction = Actions.githubNewRepo(user_id, action_value, c, stmt);
if (nameAction.equals("githubNewCommitsRepo"))
resultaction = Actions.githubNewCommitsRepo(user_id, action_value, c, stmt);
if (nameAction.equals("githubNewCommentsRepo"))
resultaction = Actions.githubNewCommentsRepo(user_id, action_value, c, stmt);
if (nameAction.equals("wetherTemperatureMax"))
resultaction = Actions.wetherTemperatureMax(user_id, action_value, c, stmt);
if (nameAction.equals("wetherTemperatureMin"))
resultaction = Actions.wetherTemperatureMin(user_id, action_value, c, stmt);
if (nameAction.equals("wetherHumidityMax"))
resultaction = Actions.wetherHumidityMax(user_id, action_value, c, stmt);
if (nameAction.equals("wetherHumidityMin"))
resultaction = Actions.wetherHumidityMin(user_id, action_value, c, stmt);
if (nameAction.equals("twitchStreamerIsOnline"))
resultaction = Actions.twitchStreamerIsOnline(user_id, action_value, c, stmt);
if (nameAction.equals("spotifyListen"))
resultaction = Actions.spotifyListen(user_id, c, stmt);
if (nameAction.equals("spotifyListenJul"))
resultaction = Actions.spotifyListenJul(user_id, c, stmt);
if (nameAction.equals("spotifyNewPlaylist"))
resultaction = Actions.spotifyNewPlaylist(user_id, action_value, c, stmt);
if (resultaction) {
System.out.println("mon action :" + nameAction + " a marché");
String nameReaction = getReactionNamebyId(reaction_id);
System.out.println("ma reaction :" + nameReaction + " -> value : " + reaction_value);
if (nameReaction.equals("githubPostComment"))
Reactions.githubPostComment(user_id, reaction_value, c ,stmt);
if (nameReaction.equals("githubCreateRepo"))
Reactions.githubCreateRepo(user_id, reaction_value, c ,stmt);
if (nameReaction.equals("githubReactionComments"))
Reactions.githubReactionComments(user_id, reaction_value, c ,stmt);
if (nameReaction.equals("youtubeReactionNewFriend"))
Reactions.youtubeReactionNewFriend(user_id, reaction_value, c ,stmt);
if (nameReaction.equals("gmailSendMail"))
Reactions.gmailSendMail(user_id, reaction_value, c ,stmt);
if (nameReaction.equals("twitterNewPost"))
Reactions.twitterNewPost(twitter, reaction_value);
if (nameReaction.equals("spotifyVolumeMax"))
Reactions.spotifyVolumeMax(user_id, c, stmt);
if (nameReaction.equals("spotifyPause"))
Reactions.spotifyPause(user_id, c, stmt);
if (nameReaction.equals("spotifyNext"))
Reactions.spotifyNext(user_id, c, stmt);
}
}
} catch (Exception e) {
System.out.println(e);
}
}
}
|
[
"\"POSTGRES_DB\"",
"\"POSTGRES_USER\"",
"\"POSTGRES_PASSWORD\""
] |
[] |
[
"POSTGRES_PASSWORD",
"POSTGRES_USER",
"POSTGRES_DB"
] |
[]
|
["POSTGRES_PASSWORD", "POSTGRES_USER", "POSTGRES_DB"]
|
java
| 3 | 0 | |
vendor/gopkg.in/tumblr/go-collins.v0/collins/collins.go
|
package collins
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
yaml "gopkg.in/yaml.v2"
)
const (
maxHTTPCode = 299
)
var (
VERSION = "0.1.0"
)
// Client represents a client connection to a collins server. Requests to the
// various APIs are done by calling functions on the various services.
type Client struct {
client *http.Client
BaseURL *url.URL
User string
Password string
Assets *AssetService
AssetTypes *AssetTypeService
Logs *LogService
States *StateService
Tags *TagService
Management *ManagementService
IPAM *IPAMService
Firehose *FirehoseService
}
// Error represents an error returned from collins. Collins returns
// errors in JSON format, which we marshal in to this struct.
type Error struct {
Status string `json:"status"`
Data struct {
Message string `json:"message"`
} `json:"data"`
}
// Container is used to deserialize the JSON reponse from the API.
type Container struct {
CollinsStatus string `json:"status"`
Data interface{} `json:"data"`
}
// Response is our custom response type. It has the HTTP response embedded for
// debugging purposes. It also has embedded the `container` that the JSON
// response gets decoded into (if the caller to `Do` passes in a struct
// to decode into). Finally it contains all necessary data for pagination.
type Response struct {
*http.Response
*Container
PreviousPage int
CurrentPage int
NextPage int
TotalResults int
}
// PageOpts allows the caller to specify pagination options. Since Collins takes
// in pagination options via URL parameters we can use google/go-querystring to
// describe our pagination opts as structs. This also allows embedding of
// pagination options directly into other request option structs.
type PageOpts struct {
Page int `url:"page,omitempty"`
Size int `url:"size,omitempty"`
Sort string `url:"sort,omitempty"`
SortField string `url:"sortField,omitempty"`
}
// PaginationResponse is used to represent the pagination information coming
// back from the collins server.
type PaginationResponse struct {
PreviousPage int `json:"PreviousPage"`
CurrentPage int `json:"CurrentPage"`
NextPage int `json:"NextPage"`
TotalResults int `json:"TotalResults"`
}
func (e *Error) Error() string {
return e.Data.Message
}
// NewClient creates a Client struct and returns a point to it. This client is
// then used to query the various APIs collins provides.
func NewClient(username, password, baseurl string) (*Client, error) {
u, err := url.Parse(baseurl)
if err != nil {
return nil, err
}
c := &Client{
client: &http.Client{},
User: username,
Password: password,
BaseURL: u,
}
c.Assets = &AssetService{client: c}
c.AssetTypes = &AssetTypeService{client: c}
c.Logs = &LogService{client: c}
c.States = &StateService{client: c}
c.Tags = &TagService{client: c}
c.Management = &ManagementService{client: c}
c.IPAM = &IPAMService{client: c}
c.Firehose = &FirehoseService{client: c}
return c, nil
}
// NewClientFromYaml sets up a new Client, but reads the credentials and host
// from a yaml file on disk. The following paths are searched:
//
// * Path in COLLINS_CLIENT_CONFIG environment variable
// * ~/.collins.yml
// * /etc/collins.yml
// * /var/db/collins.yml
func NewClientFromYaml() (*Client, error) {
yamlPaths := []string{
os.Getenv("COLLINS_CLIENT_CONFIG"),
path.Join(os.Getenv("HOME"), ".collins.yml"),
"/etc/collins.yml",
"/var/db/collins.yml",
}
return NewClientFromFiles(yamlPaths...)
}
// NewClientFromFiles takes an array of paths to look for credentials, and
// returns a Client based on the first config file that exists and parses
// correctly. Otherwise, it returns nil and an error.
func NewClientFromFiles(paths ...string) (*Client, error) {
f, err := openYamlFiles(paths...)
if err != nil {
return nil, err
}
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
var creds struct {
Host string
Username string
Password string
}
err = yaml.Unmarshal(data, &creds)
if err != nil {
return nil, err
}
return NewClient(creds.Username, creds.Password, creds.Host)
}
func openYamlFiles(paths ...string) (io.Reader, error) {
for _, path := range paths {
f, err := os.Open(path)
if err != nil {
continue
} else {
return f, nil
}
}
errStr := fmt.Sprintf("Could not load collins credentials from file. (Searched: %s)", strings.Join(paths, ", "))
return nil, errors.New(errStr)
}
// NewRequest creates a new HTTP request which can then be performed by Do.
func (c *Client) NewRequest(method, path string) (*http.Request, error) {
rel, err := url.Parse(path)
if err != nil {
return nil, err
}
reqURL := c.BaseURL.ResolveReference(rel)
req, err := http.NewRequest(method, reqURL.String(), nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(c.User, c.Password)
req.Header.Set("User-Agent", "go-collins "+VERSION)
req.Header.Set("Accept", "application/json")
return req, nil
}
// Create our custom response object that we will pass back to caller
func newResponse(r *http.Response) *Response {
resp := &Response{Response: r}
resp.populatePagination()
return resp
}
// Read in data from headers and use that to populate our response struct
func (r *Response) populatePagination() {
h := r.Header
if prev := h.Get("X-Pagination-PreviousPage"); prev != "" {
n, _ := strconv.Atoi(prev)
r.PreviousPage = n
}
if cur := h.Get("X-Pagination-CurrentPage"); cur != "" {
n, _ := strconv.Atoi(cur)
r.CurrentPage = n
}
if next := h.Get("X-Pagination-NextPage"); next != "" {
n, _ := strconv.Atoi(next)
r.NextPage = n
}
if total := h.Get("X-Pagination-TotalResults"); total != "" {
n, _ := strconv.Atoi(total)
r.TotalResults = n
}
}
// Do performs a given request that was built with `NewRequest`. We return the
// response object as well so that callers can have access to pagination info.
func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
response := newResponse(resp)
if resp.StatusCode > maxHTTPCode {
collinsError := new(Error)
if strings.Contains(resp.Header.Get("Content-Type"), "application/json;") {
err = json.NewDecoder(resp.Body).Decode(collinsError)
if err != nil {
return response, err
}
} else if strings.Contains(resp.Header.Get("Content-Type"), "text/plain;") {
errbuf := &bytes.Buffer{}
bufio.NewReader(resp.Body).WriteTo(errbuf)
collinsError.Data.Message = errbuf.String()
} else {
errstr := fmt.Sprintf("Response with unexpected Content-Type - `%s' received.", resp.Header.Get("Content-Type"))
return response, errors.New(errstr)
}
collinsError.Data.Message = resp.Status + " returned from collins: " + collinsError.Data.Message
return response, collinsError
}
// This looks kind of weird but it works. This allows callers that pass in
// an interface to have response JSON decoded into the interface they pass.
// It also allows accessing `response.container.Status` etc. to get helpful
// response info from Collins.
if v != nil {
response.Container = &Container{
Data: v,
}
}
if strings.Contains(resp.Header.Get("Content-Type"), "application/json;") {
err = json.NewDecoder(resp.Body).Decode(response)
if err != nil {
return response, err
}
} else {
errstr := fmt.Sprintf("Response with unexpected Content-Type - `%s' received. Erroring out.", resp.Header.Get("Content-Type"))
return response, errors.New(errstr)
}
return response, nil
}
|
[
"\"COLLINS_CLIENT_CONFIG\"",
"\"HOME\""
] |
[] |
[
"COLLINS_CLIENT_CONFIG",
"HOME"
] |
[]
|
["COLLINS_CLIENT_CONFIG", "HOME"]
|
go
| 2 | 0 | |
ansible/utils/gcp/ans_hosts.py
|
'''fetch external ip of each vm, output to stdout in ansible hosts file format'''
import googleapiclient.discovery
import os
def instance_list(compute, project, zone):
instance = compute.instances().list(project=project, zone=zone).execute()
return instance['items'] if 'items' in instance else None
def main():
project_id = os.environ['GOOGLE_PROJECT_ID']
zone = os.environ['GOOGLE_COMPUTE_ZONE']
compute = googleapiclient.discovery.build('compute', 'v1')
instances = instance_list(compute, project_id, zone)
ips = []
for instance in instances:
ip = instance['networkInterfaces'][0]['accessConfigs'][0]['natIP']
print('[' + instance['name'] + ']')
print(ip)
print('')
ips.append(ip)
print('[all]')
for ip in ips:
print(ip)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"GOOGLE_PROJECT_ID",
"GOOGLE_COMPUTE_ZONE"
] |
[]
|
["GOOGLE_PROJECT_ID", "GOOGLE_COMPUTE_ZONE"]
|
python
| 2 | 0 | |
file_contents_query_test.go
|
package hedera
import (
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestNewFileContentsQuery(t *testing.T) {
mockTransaction, err := newMockTransaction()
assert.NoError(t, err)
query := NewFileContentsQuery().
SetFileID(FileID{File: 3}).
SetQueryPaymentTransaction(mockTransaction)
assert.Equal(t, `fileGetContents:<header:<payment:<bodyBytes:"\n\016\n\010\010\334\311\007\020\333\237\t\022\002\030\003\022\002\030\003\030\200\302\327/\"\002\010xr\024\n\022\n\007\n\002\030\002\020\307\001\n\007\n\002\030\003\020\310\001" sigMap:<sigPair:<pubKeyPrefix:"\344\361\300\353L}\315\303\347\353\021p\263\010\212=\022\242\227\364\243\353\342\362\205\003\375g5F\355\216" ed25519:"\022&5\226\373\264\034]P\273%\354P\233k\315\231\013\337\274\254)\246+\322<\227+\273\214\212f\313\332i\027T4{\367\363UYn\n\217\253ep\004\366\203\017\272FUP\243\321/\035\235\032\013" > > > > fileID:<fileNum:3 > > `, query.QueryBuilder.pb.String())
}
func TestFileContentsQuery_Execute(t *testing.T) {
operatorAccountID, err := AccountIDFromString(os.Getenv("OPERATOR_ID"))
assert.NoError(t, err)
operatorPrivateKey, err := Ed25519PrivateKeyFromString(os.Getenv("OPERATOR_KEY"))
assert.NoError(t, err)
client := ClientForTestnet().
SetOperator(operatorAccountID, operatorPrivateKey).
SetMaxTransactionFee(NewHbar(2))
var contents = []byte("Hellow world!")
txID, err := NewFileCreateTransaction().
AddKey(operatorPrivateKey.PublicKey()).
SetContents(contents).
SetTransactionMemo("go sdk e2e tests").
Execute(client)
assert.NoError(t, err)
receipt, err := txID.GetReceipt(client)
assert.NoError(t, err)
fileID := receipt.fileID
assert.NotNil(t, fileID)
_, err = txID.GetReceipt(client)
assert.NoError(t, err)
remoteContents, err := NewFileContentsQuery().
SetFileID(*fileID).
Execute(client)
assert.NoError(t, err)
assert.Equal(t, contents, remoteContents)
txID, err = NewFileDeleteTransaction().
SetFileID(*fileID).
Execute(client)
assert.NoError(t, err)
_, err = txID.GetReceipt(client)
assert.NoError(t, err)
}
|
[
"\"OPERATOR_ID\"",
"\"OPERATOR_KEY\""
] |
[] |
[
"OPERATOR_ID",
"OPERATOR_KEY"
] |
[]
|
["OPERATOR_ID", "OPERATOR_KEY"]
|
go
| 2 | 0 | |
setup.py
|
#!/usr/bin/env python
"""
setup.py file for GridDB python client
"""
from distutils.command.build import build
import os
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
try:
with open('README.rst') as f:
readme = f.read()
except IOError:
readme = ''
os.environ["CXX"] = "g++"
os.environ["CC"] = "g++"
SOURCES = [
'src/AggregationResult.cpp',
'src/Container.cpp',
'src/ContainerInfo.cpp',
'src/Field.cpp',
'src/PartitionController.cpp',
'src/Query.cpp',
'src/QueryAnalysisEntry.cpp',
'src/RowKeyPredicate.cpp',
'src/RowList.cpp',
'src/RowSet.cpp',
'src/Store.cpp',
'src/StoreFactory.cpp',
'src/TimeSeriesProperties.cpp',
'src/TimestampUtils.cpp',
'src/griddb.i',
'src/Util.cpp'
]
DEPENDENTS = [
'src/AggregationResult.h',
'src/ContainerInfo.h',
'src/Container.h',
'src/ExpirationInfo.h',
'src/Field.h'
'src/GSException.h',
'src/PartitionController.h',
'src/Query.h',
'src/QueryAnalysisEntry.h',
'src/RowKeyPredicate.h',
'src/RowList.h',
'src/RowSet.h',
'src/Store.h',
'src/StoreFactory.h',
'src/TimeSeriesProperties.h',
'src/TimestampUtils.h',
'src/gstype_python.i',
'src/gstype.i',
'src/Util.h',
'include/gridstore.h'
]
INCLUDES = [
'include',
'src',
os.environ['HOME'] + '/.pyenv/versions/3.6.9/lib/python3.6/site-packages/numpy/core/include/'
]
COMPILE_ARGS = [
'-std=c++0x'
]
LIBRARIES = [
'rt',
'gridstore'
]
SWIG_OPTS = [
'-DSWIGWORDSIZE64',
'-c++',
'-outdir',
'.',
'-Isrc'
]
class CustomBuild(build):
sub_commands = [
('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts)
]
griddb_module = Extension('_griddb_python',
sources=SOURCES,
include_dirs=INCLUDES,
libraries=LIBRARIES,
extra_compile_args=COMPILE_ARGS,
swig_opts=SWIG_OPTS,
depends=DEPENDENTS
)
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6"
]
setup(name='griddb_python',
version='0.8.3',
author='Katsuhiko Nonomura',
author_email='[email protected]',
description='GridDB Python Client Library built using SWIG',
long_description=readme,
ext_modules=[griddb_module],
py_modules=['griddb_python'],
url='https://github.com/griddb/python_client/',
license='Apache Software License',
cmdclass={'build': CustomBuild},
long_description_content_type = 'text/x-rst',
classifiers=classifiers
)
|
[] |
[] |
[
"CXX",
"HOME",
"CC"
] |
[]
|
["CXX", "HOME", "CC"]
|
python
| 3 | 0 | |
cmd/update-main.go
|
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"crypto"
"encoding/hex"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/fatih/color"
"github.com/inconshreveable/go-update"
"github.com/mattn/go-ieproxy"
isatty "github.com/mattn/go-isatty"
"github.com/minio/cli"
json "github.com/minio/mc/pkg/colorjson"
"github.com/minio/mc/pkg/probe"
_ "github.com/minio/sha256-simd" // Needed for sha256 hash verifier.
)
// Check for new software updates.
var updateCmd = cli.Command{
Name: "update",
Usage: "update mc to latest release",
Action: mainUpdate,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "json",
Usage: "enable JSON formatted output",
},
},
CustomHelpTemplate: `Name:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}}{{if .VisibleFlags}} [FLAGS]{{end}}
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
EXIT STATUS:
0 - you are already running the most recent version
1 - new update was applied successfully
-1 - error in getting update information
EXAMPLES:
1. Check and update mc:
{{.Prompt}} {{.HelpName}}
`,
}
const (
mcReleaseTagTimeLayout = "2006-01-02T15-04-05Z"
mcOSARCH = runtime.GOOS + "-" + runtime.GOARCH
mcReleaseURL = "https://dl.min.io/client/mc/release/" + mcOSARCH + "/"
)
var (
// Newer official download info URLs appear earlier below.
mcReleaseInfoURLs = []string{
mcReleaseURL + "mc.sha256sum",
mcReleaseURL + "mc.shasum",
}
// For windows our files have .exe additionally.
mcReleaseWindowsInfoURLs = []string{
mcReleaseURL + "mc.exe.sha256sum",
mcReleaseURL + "mc.exe.shasum",
}
)
// mcVersionToReleaseTime - parses a standard official release
// mc --version string.
//
// An official binary's version string is the release time formatted
// with RFC3339 (in UTC) - e.g. `2017-09-29T19:16:56Z`
func mcVersionToReleaseTime(version string) (releaseTime time.Time, err *probe.Error) {
var e error
releaseTime, e = time.Parse(time.RFC3339, version)
return releaseTime, probe.NewError(e)
}
// releaseTimeToReleaseTag - converts a time to a string formatted as
// an official mc release tag.
//
// An official mc release tag looks like:
// `RELEASE.2017-09-29T19-16-56Z`
func releaseTimeToReleaseTag(releaseTime time.Time) string {
return "RELEASE." + releaseTime.Format(mcReleaseTagTimeLayout)
}
// releaseTagToReleaseTime - reverse of `releaseTimeToReleaseTag()`
func releaseTagToReleaseTime(releaseTag string) (releaseTime time.Time, err *probe.Error) {
tagTimePart := strings.TrimPrefix(releaseTag, "RELEASE.")
if tagTimePart == releaseTag {
return releaseTime, probe.NewError(fmt.Errorf("%s is not a valid release tag", releaseTag))
}
var e error
releaseTime, e = time.Parse(mcReleaseTagTimeLayout, tagTimePart)
return releaseTime, probe.NewError(e)
}
// getModTime - get the file modification time of `path`
func getModTime(path string) (t time.Time, err *probe.Error) {
var e error
path, e = filepath.EvalSymlinks(path)
if e != nil {
return t, probe.NewError(fmt.Errorf("Unable to get absolute path of %s. %w", path, e))
}
// Version is mc non-standard, we will use mc binary's
// ModTime as release time.
var fi os.FileInfo
fi, e = os.Stat(path)
if e != nil {
return t, probe.NewError(fmt.Errorf("Unable to get ModTime of %s. %w", path, e))
}
// Return the ModTime
return fi.ModTime().UTC(), nil
}
// GetCurrentReleaseTime - returns this process's release time. If it
// is official mc --version, parsed version is returned else mc
// binary's mod time is returned.
func GetCurrentReleaseTime() (releaseTime time.Time, err *probe.Error) {
if releaseTime, err = mcVersionToReleaseTime(Version); err == nil {
return releaseTime, nil
}
// Looks like version is mc non-standard, we use mc
// binary's ModTime as release time:
path, e := os.Executable()
if e != nil {
return releaseTime, probe.NewError(e)
}
return getModTime(path)
}
// IsDocker - returns if the environment mc is running in docker or
// not. The check is a simple file existence check.
//
// https://github.com/moby/moby/blob/master/daemon/initlayer/setup_unix.go#L25
//
// "/.dockerenv": "file",
//
func IsDocker() bool {
_, e := os.Stat("/.dockerenv")
if os.IsNotExist(e) {
return false
}
return e == nil
}
// IsDCOS returns true if mc is running in DCOS.
func IsDCOS() bool {
// http://mesos.apache.org/documentation/latest/docker-containerizer/
// Mesos docker containerizer sets this value
return os.Getenv("MESOS_CONTAINER_NAME") != ""
}
// IsKubernetes returns true if MinIO is running in kubernetes.
func IsKubernetes() bool {
// Kubernetes env used to validate if we are
// indeed running inside a kubernetes pod
// is KUBERNETES_SERVICE_HOST but in future
// we might need to enhance this.
return os.Getenv("KUBERNETES_SERVICE_HOST") != ""
}
// IsSourceBuild - returns if this binary is a non-official build from
// source code.
func IsSourceBuild() bool {
_, err := mcVersionToReleaseTime(Version)
return err != nil
}
// DO NOT CHANGE USER AGENT STYLE.
// The style should be
//
// mc (<OS>; <ARCH>[; dcos][; kubernetes][; docker][; source]) mc/<VERSION> mc/<RELEASE-TAG> mc/<COMMIT-ID>
//
// Any change here should be discussed by opening an issue at
// https://github.com/minio/mc/issues.
func getUserAgent() string {
userAgentParts := []string{}
// Helper function to concisely append a pair of strings to a
// the user-agent slice.
uaAppend := func(p, q string) {
userAgentParts = append(userAgentParts, p, q)
}
uaAppend("mc (", runtime.GOOS)
uaAppend("; ", runtime.GOARCH)
if IsDCOS() {
uaAppend("; ", "dcos")
}
if IsKubernetes() {
uaAppend("; ", "kubernetes")
}
if IsDocker() {
uaAppend("; ", "docker")
}
if IsSourceBuild() {
uaAppend("; ", "source")
}
uaAppend(") mc/", Version)
uaAppend(" mc/", ReleaseTag)
uaAppend(" mc/", CommitID)
return strings.Join(userAgentParts, "")
}
func downloadReleaseURL(releaseChecksumURL string, timeout time.Duration) (content string, err *probe.Error) {
req, e := http.NewRequest("GET", releaseChecksumURL, nil)
if e != nil {
return content, probe.NewError(e)
}
req.Header.Set("User-Agent", getUserAgent())
client := &http.Client{
Timeout: timeout,
Transport: &http.Transport{
Proxy: ieproxy.GetProxyFunc(),
// need to close connection after usage.
DisableKeepAlives: true,
},
}
resp, e := client.Do(req)
if e != nil {
return content, probe.NewError(e)
}
if resp == nil {
return content, probe.NewError(fmt.Errorf("No response from server to download URL %s", releaseChecksumURL))
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return content, probe.NewError(fmt.Errorf("Error downloading URL %s. Response: %v", releaseChecksumURL, resp.Status))
}
contentBytes, e := ioutil.ReadAll(resp.Body)
if e != nil {
return content, probe.NewError(fmt.Errorf("Error reading response. %s", err))
}
return string(contentBytes), nil
}
// DownloadReleaseData - downloads release data from mc official server.
func DownloadReleaseData(timeout time.Duration) (data string, err *probe.Error) {
releaseURLs := mcReleaseInfoURLs
if runtime.GOOS == "windows" {
releaseURLs = mcReleaseWindowsInfoURLs
}
return func() (data string, err *probe.Error) {
for _, url := range releaseURLs {
data, err = downloadReleaseURL(url, timeout)
if err == nil {
return data, nil
}
}
return data, err.Trace(releaseURLs...)
}()
}
// parseReleaseData - parses release info file content fetched from
// official mc download server.
//
// The expected format is a single line with two words like:
//
// fbe246edbd382902db9a4035df7dce8cb441357d mc.RELEASE.2016-10-07T01-16-39Z
//
// The second word must be `mc.` appended to a standard release tag.
func parseReleaseData(data string) (sha256Hex string, releaseTime time.Time, err *probe.Error) {
fields := strings.Fields(data)
if len(fields) != 2 {
return sha256Hex, releaseTime, probe.NewError(fmt.Errorf("Unknown release data `%s`", data))
}
sha256Hex = fields[0]
releaseInfo := fields[1]
fields = strings.SplitN(releaseInfo, ".", 2)
if len(fields) != 2 {
return sha256Hex, releaseTime, probe.NewError(fmt.Errorf("Unknown release information `%s`", releaseInfo))
}
if fields[0] != "mc" {
return sha256Hex, releaseTime, probe.NewError(fmt.Errorf("Unknown release `%s`", releaseInfo))
}
releaseTime, err = releaseTagToReleaseTime(fields[1])
if err != nil {
return sha256Hex, releaseTime, err.Trace(fields...)
}
return sha256Hex, releaseTime, nil
}
func getLatestReleaseTime(timeout time.Duration) (sha256Hex string, releaseTime time.Time, err *probe.Error) {
data, err := DownloadReleaseData(timeout)
if err != nil {
return sha256Hex, releaseTime, err.Trace()
}
return parseReleaseData(data)
}
func getDownloadURL(releaseTag string) (downloadURL string) {
// Check if we are docker environment, return docker update command
if IsDocker() {
// Construct release tag name.
return fmt.Sprintf("docker pull minio/mc:%s", releaseTag)
}
// For binary only installations, we return link to the latest binary.
if runtime.GOOS == "windows" {
return mcReleaseURL + "mc.exe"
}
return mcReleaseURL + "mc"
}
func getUpdateInfo(timeout time.Duration) (updateMsg string, sha256Hex string, currentReleaseTime, latestReleaseTime time.Time, err *probe.Error) {
currentReleaseTime, err = GetCurrentReleaseTime()
if err != nil {
return updateMsg, sha256Hex, currentReleaseTime, latestReleaseTime, err.Trace()
}
sha256Hex, latestReleaseTime, err = getLatestReleaseTime(timeout)
if err != nil {
return updateMsg, sha256Hex, currentReleaseTime, latestReleaseTime, err.Trace()
}
var older time.Duration
var downloadURL string
if latestReleaseTime.After(currentReleaseTime) {
older = latestReleaseTime.Sub(currentReleaseTime)
downloadURL = getDownloadURL(releaseTimeToReleaseTag(latestReleaseTime))
}
return prepareUpdateMessage(downloadURL, older), sha256Hex, currentReleaseTime, latestReleaseTime, nil
}
var (
// Check if we stderr, stdout are dumb terminals, we do not apply
// ansi coloring on dumb terminals.
isTerminal = func() bool {
return isatty.IsTerminal(os.Stdout.Fd()) && isatty.IsTerminal(os.Stderr.Fd())
}
colorCyanBold = func() func(a ...interface{}) string {
if isTerminal() {
color.New(color.FgCyan, color.Bold).SprintFunc()
}
return fmt.Sprint
}()
colorYellowBold = func() func(format string, a ...interface{}) string {
if isTerminal() {
return color.New(color.FgYellow, color.Bold).SprintfFunc()
}
return fmt.Sprintf
}()
colorGreenBold = func() func(format string, a ...interface{}) string {
if isTerminal() {
return color.New(color.FgGreen, color.Bold).SprintfFunc()
}
return fmt.Sprintf
}()
)
func doUpdate(sha256Hex string, latestReleaseTime time.Time, ok bool) (updateStatusMsg string, err *probe.Error) {
if !ok {
updateStatusMsg = colorGreenBold("mc update to version RELEASE.%s canceled.",
latestReleaseTime.Format(mcReleaseTagTimeLayout))
return updateStatusMsg, nil
}
var sha256Sum []byte
var e error
sha256Sum, e = hex.DecodeString(sha256Hex)
if e != nil {
return updateStatusMsg, probe.NewError(e)
}
resp, e := http.Get(getDownloadURL(releaseTimeToReleaseTag(latestReleaseTime)))
if e != nil {
return updateStatusMsg, probe.NewError(e)
}
defer resp.Body.Close()
// FIXME: add support for gpg verification as well.
if e = update.Apply(resp.Body,
update.Options{
Hash: crypto.SHA256,
Checksum: sha256Sum,
},
); e != nil {
return updateStatusMsg, probe.NewError(e)
}
return colorGreenBold("mc updated to version RELEASE.%s successfully.",
latestReleaseTime.Format(mcReleaseTagTimeLayout)), nil
}
type updateMessage struct {
Status string `json:"status"`
Message string `json:"message"`
}
// String colorized make bucket message.
func (s updateMessage) String() string {
return s.Message
}
// JSON jsonified make bucket message.
func (s updateMessage) JSON() string {
s.Status = "success"
updateJSONBytes, e := json.MarshalIndent(s, "", " ")
fatalIf(probe.NewError(e), "Unable to marshal into JSON.")
return string(updateJSONBytes)
}
func mainUpdate(ctx *cli.Context) {
if len(ctx.Args()) != 0 {
cli.ShowCommandHelpAndExit(ctx, "update", -1)
}
globalQuiet = ctx.Bool("quiet") || ctx.GlobalBool("quiet")
globalJSON = ctx.Bool("json") || ctx.GlobalBool("json")
updateMsg, sha256Hex, _, latestReleaseTime, err := getUpdateInfo(10 * time.Second)
if err != nil {
errorIf(err, "Unable to update ‘mc’.")
os.Exit(-1)
}
// Nothing to update running the latest release.
color.New(color.FgGreen, color.Bold)
if updateMsg == "" {
printMsg(updateMessage{
Status: "success",
Message: colorGreenBold("You are already running the most recent version of ‘mc’."),
})
os.Exit(0)
}
printMsg(updateMessage{
Status: "success",
Message: updateMsg,
})
// Avoid updating mc development, source builds.
if strings.Contains(updateMsg, mcReleaseURL) {
var updateStatusMsg string
var err *probe.Error
updateStatusMsg, err = doUpdate(sha256Hex, latestReleaseTime, true)
if err != nil {
errorIf(err, "Unable to update ‘mc’.")
os.Exit(-1)
}
printMsg(updateMessage{Status: "success", Message: updateStatusMsg})
os.Exit(1)
}
}
|
[
"\"MESOS_CONTAINER_NAME\"",
"\"KUBERNETES_SERVICE_HOST\""
] |
[] |
[
"MESOS_CONTAINER_NAME",
"KUBERNETES_SERVICE_HOST"
] |
[]
|
["MESOS_CONTAINER_NAME", "KUBERNETES_SERVICE_HOST"]
|
go
| 2 | 0 | |
server/datastore/mysql/migrations/tables/20161118212613_CreateTableScheduledQueries.go
|
package tables
import (
"database/sql"
)
func init() {
MigrationClient.AddMigration(Up_20161118212613, Down_20161118212613)
}
func Up_20161118212613(tx *sql.Tx) error {
_, err := tx.Exec(
"CREATE TABLE IF NOT EXISTS `scheduled_queries` (" +
"`id` int(10) unsigned NOT NULL AUTO_INCREMENT," +
"`created_at` timestamp DEFAULT CURRENT_TIMESTAMP," +
"`updated_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP," +
"`deleted_at` timestamp NULL DEFAULT NULL," +
"`deleted` tinyint(1) NOT NULL DEFAULT FALSE," +
"`pack_id` int(10) unsigned DEFAULT NULL," +
"`query_id` int(10) unsigned DEFAULT NULL," +
"`interval` int(10) unsigned DEFAULT NULL," +
"`snapshot` tinyint(1) DEFAULT NULL," +
"`removed` tinyint(1) DEFAULT NULL," +
"`platform` varchar(255) DEFAULT NULL," +
"`version` varchar(255) DEFAULT NULL," +
"`shard` int(10) unsigned DEFAULT NULL," +
"PRIMARY KEY (`id`)" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8;",
)
return err
}
func Down_20161118212613(tx *sql.Tx) error {
_, err := tx.Exec("DROP TABLE IF EXISTS `scheduled_queries`;")
return err
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/footProno.go
|
package main
import (
"encoding/gob"
"log"
"net/http"
"os"
"text/template"
"time"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/joho/godotenv"
)
var Version = "Development"
var stat Stats
var config Config
var cookieName = "footProno-secure-cookie"
// store will hold all session data
var store *sessions.CookieStore
// tpl holds all parsed templates
var tpl *template.Template
func init() {
authKeyOne := []byte("whatwedointheshadows")
encryptionKeyOne := []byte("themandalorian22")
store = sessions.NewCookieStore(
authKeyOne,
encryptionKeyOne,
)
store.Options = &sessions.Options{
Path: "/",
MaxAge: 60 * 15,
HttpOnly: true,
}
gob.Register(User{})
tpl = template.Must(template.ParseGlob("templates/*.gohtml"))
}
func main() {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
serverPort := os.Getenv("SERVER_PORT")
if serverPort == "" {
serverPort = "4000" //localhost
}
serverHost := os.Getenv("SERVER_NAME")
if serverHost == "" {
serverHost = "localhost" //localhost
}
//updateJavaScript(serverPort, serverHost)
stat.Version = Version
log.Println("Version:\t", stat.Version)
log.Println("Running Web Server Api on " + serverHost + " " + serverPort)
router := mux.NewRouter()
log.Println("Preparing to Serve Api")
router.HandleFunc("/", index)
router.HandleFunc("/gom", getOfficialMatches)
router.HandleFunc("/gt", getTeams)
router.HandleFunc("/health", health)
router.HandleFunc("/login", login)
router.HandleFunc("/logout", logout)
router.HandleFunc("/about", about)
fileServer := http.FileServer(http.Dir("static"))
router.PathPrefix("/js").Handler(http.StripPrefix("/", fileServer))
router.PathPrefix("/css").Handler(http.StripPrefix("/", fileServer))
router.PathPrefix("/img").Handler(http.StripPrefix("/", fileServer))
srv := &http.Server{
Handler: router,
Addr: ":" + serverPort,
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Println("Ready to receive calls")
log.Fatal(srv.ListenAndServe())
}
|
[
"\"SERVER_PORT\"",
"\"SERVER_NAME\""
] |
[] |
[
"SERVER_PORT",
"SERVER_NAME"
] |
[]
|
["SERVER_PORT", "SERVER_NAME"]
|
go
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.