filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pdf.go
|
package stopgo
import (
"code.google.com/p/go-qrcode"
"code.google.com/p/gofpdf"
"fmt"
"os"
)
type pdfWriter struct {
pdf *gofpdf.Fpdf
fl *os.File
}
func (pw *pdfWriter) Write(p []byte) (n int, err error) {
if pw.pdf.Ok() {
return pw.fl.Write(p)
}
return
}
func (pw *pdfWriter) Close() (err error) {
if pw.fl != nil {
pw.fl.Close()
pw.fl = nil
}
if pw.pdf.Ok() {
fmt.Printf("Successfully generated resume.pdf\n")
} else {
fmt.Printf("%s\n", pw.pdf.Error())
}
return
}
func docWriter(pdf *gofpdf.Fpdf, fileStr string) *pdfWriter {
pw := new(pdfWriter)
pw.pdf = pdf
if pdf.Ok() {
var err error
pw.fl, err = os.Create(fileStr)
if err != nil {
pdf.SetErrorf("Error opening output file %s", fileStr)
}
}
return pw
}
func Write(outputName string, resume *Resume) {
var y0 float64
pdf := gofpdf.New("P", "mm", "letter", "")
const (
pageWd = 216.0 // letter 216 mm x 279 mm
pageHeight = 279.0
margin = 10.0
gutter = 4
colNum = 2
mainColumnWidth float64 = (pageWd - 2*margin - gutter) * 3 / 4
supplementColumnWidth float64 = (pageWd - 2*margin - gutter) * 1 / 4
colWd = (pageWd - 2*margin - (colNum-1)*gutter) / colNum
fontSize = 9.75
fontFamily = "Roboto"
)
lineHeight := pdf.PointConvert(fontSize) * 1.25
columnWidths := [2]float64{mainColumnWidth, supplementColumnWidth}
// set up font
gofpdf.MakeFont("font/Roboto/Roboto-Light.ttf", "font/cp1252.map", "font", nil, true)
gofpdf.MakeFont("font/Roboto/Roboto-Regular.ttf", "font/cp1252.map", "font", nil, true)
gofpdf.MakeFont("font/Droid_Serif/DroidSerif.ttf", "font/cp1252.map", "font", nil, true)
// gofpdf.MakeFont("font/Droid_Serif/DroidSerif-Bold.ttf", "font/cp1252.map", "font", nil, true)
// gofpdf.MakeFont("font/Playfair_Display/PlayfairDisplay-Regular.ttf", "font/cp1252.map", "font", nil, true)
gofpdf.MakeFont("font/Playfair_Display/PlayfairDisplay-Bold.ttf", "font/cp1252.map", "font", nil, true)
gofpdf.MakeFont("font/glyphicons-halflings-regular.ttf", "font/glyphicons.map", "font", nil, true)
pdf.SetFontLocation("font")
pdf.SetTitle("", true)
pdf.SetAuthor("John Tunison", true)
pdf.SetSubject("Resume", true)
pdf.SetCreator("John Tunison", true)
pdf.SetKeywords("rockstar", true)
pdf.AddFont("Roboto", "", "Roboto-Light.json")
pdf.AddFont("Roboto", "B", "Roboto-Regular.json")
pdf.AddFont("halflings", "", "glyphicons-halflings-regular.json")
pdf.AddFont("DroidSerif", "", "DroidSerif.json")
// pdf.AddFont("DroidSerif", "B", "DroidSerif-Bold.json")
// pdf.AddFont("Playfair", "", "PlayfairDisplay-Regular.json")
pdf.AddFont("PlayfairDisplay", "B", "PlayfairDisplay-Bold.json")
setCol := func(col int) {
x := margin
for j := 0; j < col; j++ {
x += columnWidths[j] + gutter
}
// log.Printf("setCol(%d) -> x = %f (%s)", col, x, columnWidths)
pdf.SetLeftMargin(x)
pdf.SetX(x)
}
bullet := func(column int, text string) {
// see http://www.fpdf.org/~~V/en/script/script38.php
// see http://www.ascii-code.com for bullet character list
bulletString := "\x95"
bulletWidth := pdf.GetStringWidth(bulletString) + gutter/2
columnWidth := columnWidths[column]
pdf.Cell(bulletWidth, lineHeight, bulletString)
pdf.MultiCell(columnWidth-bulletWidth, lineHeight, text, "", "L", false)
}
mainExperience := func(role string, start int, end int) {
x := pdf.GetX()
y := pdf.GetY()
pdf.SetFont(fontFamily, "B", fontSize)
pdf.MultiCell(mainColumnWidth, lineHeight, role, "", "L", false)
pdf.SetXY(x, y)
pdf.MultiCell(mainColumnWidth, lineHeight, fmt.Sprintf("%d - %d", start, end), "", "R", false)
pdf.SetFont(fontFamily, "", fontSize)
}
horizontalRule := func(width, thickness float64) {
x := pdf.GetX()
y := pdf.GetY()
pdf.SetLineWidth(thickness)
pdf.SetDrawColor(191, 191, 191)
pdf.Line(x, y, x+width, y)
pdf.Ln(2)
}
heading := func(column int, text string) {
x := pdf.GetX()
y := pdf.GetY()
height := pdf.PointConvert(fontSize * 1.25)
columnWidth := columnWidths[column]
// draw line first, then text (so text overlays line)
switch column {
case 0:
pdf.SetXY(margin, y+height)
case 1:
pdf.SetXY(margin+mainColumnWidth+gutter, y+height)
}
horizontalRule(columnWidth, 0.2)
// now heading text
pdf.SetXY(x, y)
pdf.SetFont("DroidSerif", "", fontSize*1.25)
pdf.MultiCell(columnWidth, height, text, "", "L", false)
pdf.Ln(2)
}
packageDir := fmt.Sprintf("%s/src/github.com/jtunison/stopgo", os.Getenv("GOPATH"))
pdf.SetHeaderFunc(func() {
titleStr := resume.Name
x := margin
y := pdf.GetY()
lineHeight := pdf.PointConvert(fontSize * 3)
// then the qr code
// var png []byte
// png, err := qrcode.Encode("https://example.org", qrcode.Medium, 256)
qrCodeSize := lineHeight * 1.25
qrcode.WriteFile(resume.Links.Website, qrcode.Medium, 256, "tmp/qr.png")
pdf.Image("tmp/qr.png", pageWd-margin-qrCodeSize+2, y-2, qrCodeSize, qrCodeSize, false, "", 0, resume.Links.Website)
// write horizontal rule first
pdf.SetXY(margin, y+lineHeight)
horizontalRule(mainColumnWidth+gutter+supplementColumnWidth, 0.4)
// then write the name
pdf.SetFont("PlayfairDisplay", "B", fontSize*3)
pdf.SetXY(x, y)
pdf.SetTextColor(0, 0, 0)
pdf.Write(lineHeight, titleStr)
pdf.Ln(-1)
// then the location
pdf.Ln(1)
lineHeight = pdf.PointConvert(fontSize) * 1.25
pdf.SetFont(fontFamily, "", fontSize)
// svg("svg/map25.svg")
x = pdf.GetX()
y = pdf.GetY()
width := lineHeight*11/16
pdf.Image(fmt.Sprintf("%s/icons/map25.png", packageDir), x, y+.5, lineHeight, width, false, "", 0, "")
pdf.SetXY(x + width + 1, y)
pdf.Write(lineHeight, fmt.Sprintf("%s ", resume.Location))
x = pdf.GetX()
y = pdf.GetY()
pdf.Image(fmt.Sprintf("%s/icons/link15.png", packageDir), x, y+.5, lineHeight, width, false, "", 0, "")
pdf.SetXY(x + width + 1, y)
// pdf.SetTextColor(80, 139, 200)
pdf.WriteLinkString(lineHeight, resume.Links.Website, resume.Links.Website)
pdf.Write(lineHeight, " ")
// pdf.Write(lineHeight, fmt.Sprintf("%s ", resume.Links.Website))
x = pdf.GetX()
y = pdf.GetY()
pdf.Image(fmt.Sprintf("%s/icons/envelope5.png", packageDir), x, y+.5, lineHeight, width, false, "", 0, "")
pdf.SetXY(x + width + 1, y)
pdf.Write(lineHeight, fmt.Sprintf("%s ", resume.Email))
pdf.Ln(10)
y0 = pdf.GetY()
})
pdf.SetFooterFunc(func() {
footerStr := fmt.Sprintf("Resume generated by http://github.com/jtunison/stopgo.")
pdf.SetFont(fontFamily, "", fontSize*3/4)
width := pdf.GetStringWidth(footerStr)
lineHeight = pdf.PointConvert(fontSize * 3 / 4)
x := (pageWd - width) / 2
y := pageHeight - lineHeight - margin
pdf.SetXY(x, y)
pdf.SetTextColor(128, 128, 160)
pdf.Write(lineHeight, footerStr)
})
pdf.AddPage()
pdf.SetFont(fontFamily, "", fontSize)
setCol(0)
// Summary
heading(0, "Summary")
pdf.SetFont(fontFamily, "", fontSize)
pdf.MultiCell(mainColumnWidth, lineHeight, resume.Summary, "", "L", false)
pdf.Ln(-1)
// Work History
heading(0, "Work History")
for _, experience := range resume.History {
mainExperience(experience.Role, experience.StartYear, experience.EndYear)
//Put a hyperlink
pdf.SetTextColor(80, 139, 200)
pdf.WriteLinkString(lineHeight, experience.Company, experience.CompanyUrl)
pdf.SetTextColor(0, 0, 0)
pdf.Ln(-1)
pdf.Ln(1)
for _, bulletContent := range experience.Bullets {
bullet(0, bulletContent)
}
pdf.Ln(-1)
}
// Education
heading(0, "Education")
mainExperience(resume.Education[0].Institution, resume.Education[0].StartYear, resume.Education[0].EndYear)
pdf.MultiCell(mainColumnWidth, lineHeight, resume.Education[0].Degree, "", "L", false)
// right hand side
pdf.SetY(y0)
setCol(1)
lineHeight = pdf.PointConvert(fontSize) * 1.4
for _, supplement := range resume.Supplements {
heading(1, supplement.Heading)
for _, bulletContent := range supplement.Bullets {
pdf.SetFont(fontFamily, "", fontSize)
bullet(1, bulletContent)
}
pdf.Ln(-1)
}
pdf.OutputAndClose(docWriter(pdf, outputName))
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
utils/peurifoy_batch_predict.py
|
from utils.helper_functions import simulator
from multiprocessing import Pool
from utils.evaluation_helper import plotMSELossDistrib
import numpy as np
import os
import pandas as pd
# This is the script for doing batch evaluation
num_cpu = 10
def eval_peurifoy_for_file(filename):
# Read the Xpred file
Xpred = pd.read_csv(filename, sep=' ', header=None).values
# Run the simulator
Ypred = simulator('Peurifoy', Xpred)
# Save the Ypred into the same folder with name change
with open(filename.replace('Xpred','Ypred'), 'a') as fyp:
np.savetxt(fyp, Ypred)
def eval_whole_folder(folder):
"""
Run the eval peurifoy for file in a loop
"""
try:
pool = Pool(num_cpu)
args_list = []
for filename in os.listdir(folder):
if not ('Peurifoy' in filename and 'Xpred' in filename):
continue
args_list.append((os.path.join(folder, filename),))
print((args_list))
print(len(args_list))
pool.starmap(eval_peurifoy_for_file, args_list)
finally:
pool.close()
pool.join()
def plot_MSE(folder):
"""
Plot the MSE plots for the simulated pairs of MSE
"""
for file in os.listdir(folder):
if not ('Peurifoy' in file and 'Xpred' in file):
continue
Xpred_file = os.path.join(folder, file)
Ypred_file = Xpred_file.replace('Xpred','Ypred')
Ytruth_file = Ypred_file.replace('Ypred','Ytruth')
save_dir = '/'.join(Xpred_file.split('/')[:-1])
print(save_dir)
plotMSELossDistrib(Ypred_file, Ytruth_file, save_dir=save_dir)
if __name__ == '__main__':
# Tandem and inverse only need to do the evaluation once
#eval_whole_folder('../inverse/data')
#plot_MSE('../inverse/data')
#eval_whole_folder('../Tandem/data')
#plot_MSE('../Tandem/data')
#quit()
# For the multi_T evaluation
folder_list = ['VAE','Tandem','NN','NA','GA','INN','MDN','cINN']
#folder_list = ['GA']
# folder_list = ['MDN','INN','VAE','cINN']
for folders in folder_list:
folder = '../mm_bench_multi_eval/{}/Peurifoy'.format(folders)
# Run simulator for the whole folder
eval_whole_folder(folder)
#plot_MSE(folder)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
script popolamento DB/env/lib/python3.7/site-packages/pip/_internal/utils/misc.py
|
from __future__ import absolute_import
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import os
import posixpath
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from collections import deque
from pip._internal.exceptions import CommandError, InstallationError
from pip._internal.locations import (
running_under_virtualenv, site_packages, user_site, virtualenv_no_global,
write_delete_marker_file,
)
from pip._internal.utils.compat import (
WINDOWS, console_to_str, expanduser, stdlib_pkgs,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._vendor import pkg_resources
# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import.
from pip._vendor.retrying import retry # type: ignore
from pip._vendor.six import PY2
from pip._vendor.six.moves import input
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
if MYPY_CHECK_RUNNING:
from typing import ( # noqa: F401
Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text,
AnyStr, Container
)
from pip._vendor.pkg_resources import Distribution # noqa: F401
from pip._internal.models.link import Link # noqa: F401
from pip._internal.utils.ui import SpinnerInterface # noqa: F401
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION',
'get_installed_version', 'remove_auth_from_url']
logger = std_logging.getLogger(__name__)
WHEEL_EXTENSION = '.whl'
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION)
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def ensure_dir(path):
# type: (AnyStr) -> None
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
# type: () -> str
try:
prog = os.path.basename(sys.argv[0])
if prog in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
else:
return prog
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
# type: (str, bool) -> None
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
# type: (Union[str, Text]) -> str
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
# type: (str, str) -> str
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
# type: (str, Iterable[str]) -> str
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
# type: (str, Iterable[str]) -> str
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
# type: (float) -> str
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
# type: (str) -> bool
"""Is path is a directory containing setup.py or pyproject.toml?
"""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
pyproject_toml = os.path.join(path, 'pyproject.toml')
if os.path.isfile(pyproject_toml):
return True
return False
def is_svn_page(html):
# type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
# type: (str) -> Text
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
# type: (Union[str, Text]) -> List[Union[str, Text]]
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return [path, '']
def has_leading_dir(paths):
# type: (Iterable[Union[str, Text]]) -> bool
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
# type: (str, bool) -> str
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
# type: (str) -> Tuple[str, str]
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
# type: (str, str) -> None
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
# type: (str) -> bool
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in
sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is an editable install.
"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
# type: (bool, Container[str], bool, bool, bool) -> List[Distribution]
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``include_editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
# because of pkg_resources vendoring, mypy cannot find stub in typeshed
return [d for d in pkg_resources.working_set # type: ignore
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
# type: (Distribution) -> Optional[str]
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
return None
def dist_location(dist):
# type: (Distribution) -> str
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([
member.name for member in tar.getmembers()
])
for member in tar.getmembers():
fn = member.name
if leading:
# https://github.com/python/mypy/issues/1174
fn = split_leading_dir(fn)[1] # type: ignore
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
# https://github.com/python/typeshed/issues/2673
tar.utime(member, path) # type: ignore
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(
filename, # type: str
location, # type: str
content_type, # type: Optional[str]
link # type: Optional[Link]
):
# type: (...) -> None
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip._internal.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def call_subprocess(
cmd, # type: List[str]
show_stdout=True, # type: bool
cwd=None, # type: Optional[str]
on_returncode='raise', # type: str
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
command_desc=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
unset_environ=None, # type: Optional[Iterable[str]]
spinner=None # type: Optional[SpinnerInterface]
):
# type: (...) -> Optional[Text]
"""
Args:
extra_ok_returncodes: an iterable of integer return codes that are
acceptable, in addition to 0. Defaults to None, which means [].
unset_environ: an iterable of environment variable names to unset
prior to calling subprocess.Popen().
"""
if extra_ok_returncodes is None:
extra_ok_returncodes = []
if unset_environ is None:
unset_environ = []
# This function's handling of subprocess output is confusing and I
# previously broke it terribly, so as penance I will write a long comment
# explaining things.
#
# The obvious thing that affects output is the show_stdout=
# kwarg. show_stdout=True means, let the subprocess write directly to our
# stdout. Even though it is nominally the default, it is almost never used
# inside pip (and should not be used in new code without a very good
# reason); as of 2016-02-22 it is only used in a few places inside the VCS
# wrapper code. Ideally we should get rid of it entirely, because it
# creates a lot of complexity here for a rarely used feature.
#
# Most places in pip set show_stdout=False. What this means is:
# - We connect the child stdout to a pipe, which we read.
# - By default, we hide the output but show a spinner -- unless the
# subprocess exits with an error, in which case we show the output.
# - If the --verbose option was passed (= loglevel is DEBUG), then we show
# the output unconditionally. (But in this case we don't want to show
# the output a second time if it turns out that there was an error.)
#
# stderr is always merged with stdout (even if show_stdout=True).
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.debug("Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
for name in unset_environ:
env.pop(name, None)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
stdout=stdout, cwd=cwd, env=env,
)
proc.stdin.close()
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if logger.getEffectiveLevel() <= std_logging.DEBUG:
# Show the line immediately
logger.debug(line)
else:
# Update the spinner
if spinner is not None:
spinner.spin()
try:
proc.wait()
finally:
if proc.stdout:
proc.stdout.close()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode and proc.returncode not in extra_ok_returncodes:
if on_returncode == 'raise':
if (logger.getEffectiveLevel() > std_logging.DEBUG and
not show_stdout):
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return ''.join(all_output)
return None
def read_text_file(filename):
# type: (str) -> str
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
# https://github.com/python/mypy/issues/1174
data = data.decode(enc) # type: ignore
except UnicodeDecodeError:
continue
break
assert not isinstance(data, bytes) # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
def captured_stderr():
"""
See captured_stdout().
"""
return captured_output('stderr')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name, working_set=None):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
if working_set is None:
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
# Simulates an enum
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = {value: key for key, value in enums.items()}
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):
"""
Return the URL for a VCS requirement.
Args:
repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
project_name: the (unescaped) project name.
"""
egg_project_name = pkg_resources.to_filename(project_name)
req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name)
if subdir:
req += '&subdirectory={}'.format(subdir)
return req
def split_auth_from_netloc(netloc):
"""
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
"""
if '@' not in netloc:
return netloc, (None, None)
# Split from the right because that's how urllib.parse.urlsplit()
# behaves if more than one @ is present (which can be checked using
# the password attribute of urlsplit()'s return value).
auth, netloc = netloc.rsplit('@', 1)
if ':' in auth:
# Split from the left because that's how urllib.parse.urlsplit()
# behaves if more than one : is present (which again can be checked
# using the password attribute of the return value)
user_pass = auth.split(':', 1)
else:
user_pass = auth, None
user_pass = tuple(
None if x is None else urllib_unquote(x) for x in user_pass
)
return netloc, user_pass
def redact_netloc(netloc):
# type: (str) -> str
"""
Replace the password in a netloc with "****", if it exists.
For example, "user:[email protected]" returns "user:****@example.com".
"""
netloc, (user, password) = split_auth_from_netloc(netloc)
if user is None:
return netloc
password = '' if password is None else ':****'
return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user),
password=password,
netloc=netloc)
def _transform_url(url, transform_netloc):
purl = urllib_parse.urlsplit(url)
netloc = transform_netloc(purl.netloc)
# stripped url
url_pieces = (
purl.scheme, netloc, purl.path, purl.query, purl.fragment
)
surl = urllib_parse.urlunsplit(url_pieces)
return surl
def _get_netloc(netloc):
return split_auth_from_netloc(netloc)[0]
def remove_auth_from_url(url):
# type: (str) -> str
# Return a copy of url with 'username:password@' removed.
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
return _transform_url(url, _get_netloc)
def redact_password_from_url(url):
# type: (str) -> str
"""Replace the password in a given url with ****."""
return _transform_url(url, redact_netloc)
def protect_pip_from_modification_on_windows(modifying_pip):
"""Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
"""
pip_names = [
"pip.exe",
"pip{}.exe".format(sys.version_info[0]),
"pip{}.{}.exe".format(*sys.version_info[:2])
]
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
modifying_pip and
WINDOWS and
os.path.basename(sys.argv[0]) in pip_names
)
if should_show_use_python_msg:
new_command = [
sys.executable, "-m", "pip"
] + sys.argv[1:]
raise CommandError(
'To modify pip, please run the following command:\n{}'
.format(" ".join(new_command))
)
|
[] |
[] |
[
"PIP_NO_INPUT",
"PIP_EXISTS_ACTION"
] |
[]
|
["PIP_NO_INPUT", "PIP_EXISTS_ACTION"]
|
python
| 2 | 0 | |
tools/test.py
|
import argparse
import os
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from tools.fuse_conv_bn import fuse_module
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.core import wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='arguments in dict')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.options is None else args.options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, classwise=True, **kwargs)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LOCAL_RANK"
] |
[]
|
["LOCAL_RANK"]
|
python
| 1 | 0 | |
github_linter/__init__.py
|
""" goes through your repos and checks for things """
from collections import deque
from datetime import datetime
import itertools
import os
import time
from types import ModuleType
from typing import Any, Dict, Optional, List, Tuple
import json5 as json
from loguru import logger
from github import Github
from github.ContentFile import ContentFile
from github.GithubException import GithubException
from github.Repository import Repository
import pydantic
import pytz
import wildcard_matcher
from .repolinter import RepoLinter
from .utils import load_config
__version__ = "0.0.1"
RATELIMIT_TYPES = {
"core": {
"minlimit": 50,
},
"graphql": {
"minlimit": 5,
},
"search": {
"minlimit": 1,
},
}
class GithubLinter:
""" does things """
def __init__(self) -> None:
""" setup """
self.config = load_config()
if not self.config:
self.config = {}
self.github = self.do_login()
self.current_repo: Optional[Repository] = None
self.report: Dict[str, Any] = {}
self.modules: Dict[str, ModuleType] = {}
self.filecache: Dict[str, Dict[str, Optional[ContentFile]]] = {}
def do_login(self) -> Github:
""" does the login/auth bit """
if "github" not in self.config:
if os.getenv("GITHUB_TOKEN"):
logger.debug("Using GITHUB_TOKEN environment variable for login.")
self.github = Github(os.getenv("GITHUB_TOKEN"))
else:
if "github" not in self.config:
raise ValueError(
"No 'github' key in config, and no GITHUB_TOKEN auth - cannot start up."
)
if (
"ignore_auth" in self.config["github"]
and self.config["github"]["ignore_auth"]
):
self.github = Github()
elif (
"username" not in self.config["github"]
or "password" not in self.config["github"]
):
raise ValueError(
"No authentication details available - cannot start up."
)
else:
self.github = Github(
login_or_token=self.config["github"]["username"],
password=self.config["github"]["password"],
)
return self.github
@pydantic.validate_arguments(config=dict(arbitrary_types_allowed=True))
def add_module(self, module_name: str, module: ModuleType) -> None:
""" adds a module to modules """
self.modules[module_name] = module
def check_rate_limits(self) -> int:
""" checks the rate limits and returns a number of seconds to wait """
rate_limits = self.github.get_rate_limit()
logger.debug(json.dumps(rate_limits, indent=4, default=str, ensure_ascii=False))
sleep_time = 0
for rate_type in RATELIMIT_TYPES:
if hasattr(rate_limits, rate_type):
remaining = getattr(rate_limits, rate_type).remaining
reset = getattr(rate_limits, rate_type).reset.astimezone(pytz.utc)
if RATELIMIT_TYPES[rate_type]["minlimit"] >= remaining:
logger.debug("Need to wait until {}", reset)
now = datetime.now(tz=pytz.utc)
wait_time = reset - now
logger.debug(wait_time)
if wait_time.seconds > 300:
logger.error(
"You're going to need to wait a long time for the {} rate limit to reset... {} seconds.",
reset,
now,
wait_time.seconds,
)
if wait_time.seconds > sleep_time:
sleep_time = wait_time.seconds
else:
logger.debug(
"Rate limit for {} is {}, {} remaining - resets {}",
rate_type,
getattr(rate_limits, rate_type).limit,
remaining,
reset,
)
return sleep_time
def display_report(self) -> None:
""" displays a report """
for repo_name in self.report:
repo = self.report[repo_name]
if not repo:
logger.warning("Empty report for {}, skipping", repo_name)
errors: List[str] = []
warnings: List[str] = []
fixes: List[str] = []
if "errors" in repo and repo["errors"]:
for category in repo["errors"]:
deque(
map(
errors.append,
[
f"{category} - {error}"
for error in repo["errors"].get(category)
],
)
)
if "warnings" in repo and repo["warnings"]:
for category in repo["warnings"]:
deque(
map(
warnings.append,
[
f"{category} - {warning}"
for warning in repo["warnings"].get(category)
],
)
)
if "fixes" in repo and repo["fixes"]:
for category in repo["fixes"]:
deque(
map(
fixes.append,
[
f"{category} - {fix}"
for fix in repo["fixes"].get(category)
],
)
)
if errors or warnings or fixes:
logger.info("Report for {}", repo_name)
# deque forces map to just run
deque(map(logger.error, errors))
deque(map(logger.warning, warnings))
deque(map(logger.success, fixes))
else:
logger.info("Repository {} checks out OK", repo_name)
# @pydantic.validate_arguments(config=dict(arbitrary_types_allowed=True))
def handle_repo(
self,
repo: Repository,
check: Optional[Tuple[str]],
fix: bool,
) -> None:
""" Runs modules against the given repo """
repolinter = RepoLinter(repo)
self.current_repo = repolinter.repository
logger.debug("Current repo: {}", repo.full_name)
if repolinter.repository.archived:
logger.warning(
"Repository {} is archived!", repolinter.repository.full_name
)
if repolinter.repository.parent:
logger.warning("Parent: {}", repolinter.repository.parent.full_name)
logger.debug("Enabled modules: {}", self.modules)
for module in self.modules:
repolinter.run_module(
module=self.modules[module],
check_filter=check,
do_fixes=fix,
)
if not repolinter.errors or repolinter.warnings:
logger.debug("{} all good", repolinter.repository.full_name)
self.report[repolinter.repository.full_name] = {
"errors": repolinter.errors,
"warnings": repolinter.warnings,
"fixes": repolinter.fixes,
}
time.sleep(self.check_rate_limits())
@pydantic.validate_arguments(config=dict(arbitrary_types_allowed=True))
def get_all_user_repos(github: GithubLinter) -> List[Repository]:
""" simpler filtered listing """
config = load_config()
logger.debug("Pulling all repositories accessible to user.")
repolist = list(github.github.get_user().get_repos())
if config["linter"]["owner_list"]:
logger.debug(
"Filtering by owner list in linter config: {}",
",".join(config["linter"]["owner_list"]),
)
return [
repo
for repo in repolist
if repo.owner.login in config["linter"]["owner_list"]
]
return repolist
@pydantic.validate_arguments(config=dict(arbitrary_types_allowed=True))
def filter_by_repo(
repo_list: List[Repository],
repo_filters: List[str]
) -> List[Repository]:
""" filter repositories by name """
retval = []
for repository in repo_list:
if repository.name in repo_filters:
if repository not in retval:
retval.append(repository)
logger.debug("Adding {} based on name match", repository.name)
continue
for repo_filter in repo_filters:
if "*" in repo_filter:
if wildcard_matcher.match(repository.name, repo_filter):
if repository not in retval:
retval.append(repository)
logger.debug("Adding {} based on wildcard match", repository.name)
continue
return retval
class RepoSearchString(pydantic.BaseModel): #pylint: disable=no-member
""" Result of running generate_repo_search_string"""
needs_post_filtering: bool
search_string: str
@pydantic.validate_arguments
def generate_repo_search_string(
repo_filter: List[str],
owner_filter: List[str],
) -> RepoSearchString:
""" generates the search string,
if there's wildcards in repo_filter, then you
have to search for *everything* then filter it later
"""
has_repo_wildcard = False
for filterstring in repo_filter:
if "*" in filterstring:
has_repo_wildcard = True
logger.debug("Falling back to owner-only search because of a wildcard in the repo_filter ({})", filterstring)
break
if has_repo_wildcard or not repo_filter:
search_string = ""
logger.debug("Adding owner filter")
search_string += " ".join([f"user:{owner.strip()}" for owner in owner_filter])
logger.debug("Search string: {}", search_string)
return RepoSearchString(needs_post_filtering=has_repo_wildcard, search_string=search_string)
search_chunks = []
for owner, repo in itertools.product(owner_filter, repo_filter):
combo = f"repo:{owner.strip()}/{repo.strip()}"
# logger.debug(combo)
search_chunks.append(combo)
search_string = " ".join(search_chunks)
logger.debug("Search string: {}", search_string)
return RepoSearchString(needs_post_filtering=False, search_string=search_string)
@pydantic.validate_arguments(config=dict(arbitrary_types_allowed=True))
def search_repos(
github: GithubLinter,
repo_filter: List[str],
owner_filter: List[str],
) -> List[Repository]:
""" search repos based on cli input """
if not owner_filter:
if "owner_list" in github.config["linter"] and len(github.config["linter"]["owner_list"]) != 0:
owner_filter = github.config["linter"]["owner_list"]
else:
owner_filter = [github.github.get_user().login]
# if it's just the user, then we can query easier
if set(owner_filter) == set(github.github.get_user().login) and not repo_filter:
repos = list(get_all_user_repos(github))
else:
search_string = generate_repo_search_string(repo_filter, owner_filter)
try:
repos = list(github.github.search_repositories(search_string.search_string))
except GithubException as error_message:
logger.error("Failed to query repositories.")
if "errors" in error_message.data and len(error_message.data["errors"]) > 0:
errors = error_message.data["errors"]
for error_msg in errors:
logger.error(json.loads(error_msg)["message"])
else:
logger.error(error_message)
return []
if search_string.needs_post_filtering:
repos = filter_by_repo(repos, repo_filter)
if not github.config["linter"].get("check_forks", None):
logger.debug("Filtering out forks")
filtered_by_forks = [repo for repo in repos if repo.fork is False]
repos = filtered_by_forks
logger.debug("Search result: {}", repos)
return repos
|
[] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
python
| 1 | 0 | |
sagemaker/05_spot_instances/scripts/train.py
|
from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments
from transformers.trainer_utils import get_last_checkpoint
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from datasets import load_from_disk
import logging
import sys
import argparse
import os
# Set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.getLevelName("INFO"),
handlers=[logging.StreamHandler(sys.stdout)],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
if __name__ == "__main__":
logger.info(sys.argv)
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=3)
parser.add_argument("--train-batch-size", type=int, default=32)
parser.add_argument("--eval-batch-size", type=int, default=64)
parser.add_argument("--warmup_steps", type=int, default=500)
parser.add_argument("--model_name", type=str)
parser.add_argument("--learning_rate", type=str, default=5e-5)
parser.add_argument("--output_dir", type=str)
# Data, model, and output directories
parser.add_argument("--output-data-dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"])
parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"])
parser.add_argument("--training_dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"])
parser.add_argument("--test_dir", type=str, default=os.environ["SM_CHANNEL_TEST"])
args, _ = parser.parse_known_args()
# load datasets
train_dataset = load_from_disk(args.training_dir)
test_dataset = load_from_disk(args.test_dir)
logger.info(f" loaded train_dataset length is: {len(train_dataset)}")
logger.info(f" loaded test_dataset length is: {len(test_dataset)}")
# compute metrics function for binary classification
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="binary")
acc = accuracy_score(labels, preds)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
# download model from model hub
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
# define training args
training_args = TrainingArguments(
output_dir=args.output_dir,
num_train_epochs=args.epochs,
per_device_train_batch_size=args.train_batch_size,
per_device_eval_batch_size=args.eval_batch_size,
warmup_steps=args.warmup_steps,
evaluation_strategy="epoch",
logging_dir=f"{args.output_data_dir}/logs",
learning_rate=float(args.learning_rate),
)
# create Trainer instance
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=test_dataset,
)
# train model
if get_last_checkpoint(args.output_dir) is not None:
logger.info("***** continue training *****")
last_checkpoint = get_last_checkpoint(args.output_dir)
trainer.train(resume_from_checkpoint=last_checkpoint)
else:
trainer.train()
# evaluate model
eval_result = trainer.evaluate(eval_dataset=test_dataset)
# writes eval result to file which can be accessed later in s3 ouput
with open(os.path.join(args.output_data_dir, "eval_results.txt"), "w") as writer:
print(f"***** Eval results *****")
for key, value in sorted(eval_result.items()):
writer.write(f"{key} = {value}\n")
# Saves the model to s3
trainer.save_model(args.model_dir)
|
[] |
[] |
[
"SM_MODEL_DIR",
"SM_NUM_GPUS",
"SM_CHANNEL_TEST",
"SM_OUTPUT_DATA_DIR",
"SM_CHANNEL_TRAIN"
] |
[]
|
["SM_MODEL_DIR", "SM_NUM_GPUS", "SM_CHANNEL_TEST", "SM_OUTPUT_DATA_DIR", "SM_CHANNEL_TRAIN"]
|
python
| 5 | 0 | |
benchmarking/lab_driver.py
|
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import os
from download_benchmarks.download_benchmarks import DownloadBenchmarks
from run_remote import RunRemote
from run_lab import RunLab
from harness import BenchmarkDriver
from repo_driver import RepoDriver as OSS_RepoDriver
from utils.custom_logger import getLogger, setLoggerLevel
parser = argparse.ArgumentParser(description="Download models from dewey")
parser.add_argument("--app_id",
help="The app id you use to upload/download your file for everstore")
parser.add_argument("-b", "--benchmark_file",
help="Specify the json file for the benchmark or a number of benchmarks")
parser.add_argument("--lab", action="store_true",
help="Indicate whether the run is lab run.")
parser.add_argument("--logger_level", default="warning",
choices=["info", "warning", "error"],
help="Specify the logger level")
parser.add_argument("--remote", action="store_true",
help="Submit the job to remote devices to run the benchmark.")
parser.add_argument("--root_model_dir", required=True,
help="The root model directory if the meta data of the model uses "
"relative directory, i.e. the location field starts with //")
parser.add_argument("--token",
help="The token you use to upload/download your file for everstore")
parser.add_argument("-c", "--custom_binary",
help="Specify the custom binary that you want to run.")
parser.add_argument("--pre_built_binary",
help="Specify the pre_built_binary to bypass the building process.")
parser.add_argument("--user_string",
help="If set, use this instead of the $USER env variable as the user string.")
class LabDriver(object):
def __init__(self, raw_args=None):
self.args, self.unknowns = parser.parse_known_args(raw_args)
setLoggerLevel(self.args.logger_level)
def run(self):
if not self.args.lab and not self.args.remote:
assert self.args.benchmark_file, \
"--benchmark_file (-b) must be specified"
if self.args.benchmark_file:
getLogger().info("Checking benchmark files to download")
dbench = DownloadBenchmarks(self.args,
getLogger())
dbench.run(self.args.benchmark_file)
if self.args.remote:
unique_args = [
"--app_id", self.args.app_id,
"--token", self.args.token,
]
if self.args.benchmark_file:
unique_args.extend([
"--benchmark_file", self.args.benchmark_file,
])
if self.args.pre_built_binary:
unique_args.extend([
"--pre_built_binary", self.args.pre_built_binary,
])
if self.args.user_string:
unique_args.extend([
"--user_string", self.args.user_string,
])
# hack to remove --repo from the argument list since python2
# argparse doesn't support allow_abbrev to be False, and it is
# the prefix of --repo_dir
if '--repo' in self.unknowns:
index = self.unknowns.index('--repo')
new_unknowns = self.unknowns[:index]
new_unknowns.extend(self.unknowns[index + 2:])
self.unknowns = new_unknowns
app_class = RunRemote
elif self.args.lab:
unique_args = [
"--app_id", self.args.app_id,
"--token", self.args.token,
]
app_class = RunLab
elif self.args.custom_binary or self.args.pre_built_binary:
if self.args.custom_binary:
binary = self.args.custom_binary
else:
binary = self.args.pre_built_binary
repo_info = {
"treatment": {
"program": binary, "commit": "-1", "commit_time": 0
}
}
unique_args = [
"--info \'", json.dumps(repo_info) + '\'',
"--benchmark_file", self.args.benchmark_file,
]
app_class = BenchmarkDriver
else:
if self.args.user_string:
usr_string = self.args.user_string
else:
usr_string = os.environ["USER"]
unique_args = [
"--benchmark_file", self.args.benchmark_file,
"--user_string", usr_string,
]
app_class = OSS_RepoDriver
raw_args = []
raw_args.extend(unique_args)
raw_args.extend(["--root_model_dir", self.args.root_model_dir])
raw_args.extend(["--logger_level", self.args.logger_level])
raw_args.extend(self.unknowns)
getLogger().info("Running {} with raw_args {}".format(app_class, raw_args))
app = app_class(raw_args=raw_args)
app.run()
if __name__ == "__main__":
raw_args = None
app = LabDriver(raw_args=raw_args)
app.run()
|
[] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
python
| 1 | 0 | |
upload.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import fnmatch
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = " codereview.arcbees.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/xml', 'application/x-freemind',
'application/x-sh']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if host == "localhost" or host.startswith("localhost:"):
if email is None:
email = "[email protected]"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
local_email = email
if local_email is None:
local_email = GetEmail("Email (login for uploading to %s)" % server)
password = None
if keyring:
password = keyring.get_password(host, local_email)
if password is not None:
print "Using password from system keyring."
else:
password = getpass.getpass("Password for %s: " % local_email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(host, local_email, password)
return (local_email, password)
return rpc_server_class(server,
GetUserCredentials,
host_override=host_override,
save_cookies=save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "[email protected]":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", filename], universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'", filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
return RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ extra_args, env=env)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName():
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName()
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
# Todo(hayato): Windows users might use different path for configuration file.
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"LC_ALL",
"CODEREVIEW_VCS"
] |
[]
|
["LC_ALL", "CODEREVIEW_VCS"]
|
python
| 2 | 0 | |
tests/pkg/client/client.go
|
package client
import (
"flag"
"fmt"
"os"
"time"
"github.com/juju/errors"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/typed/pingcap.com/v1alpha1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
masterUrl string
kubeconfigPath string
)
func init() {
flag.StringVar(&kubeconfigPath, "kubeconfig", "",
"path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterUrl, "master", "",
"address of the Kubernetes API server. Overrides any value in kubeconfig. "+
"Only required if out-of-cluster.")
}
func NewCliOrDie() (versioned.Interface, kubernetes.Interface) {
cfg, err := GetConfig()
if err != nil {
panic(err)
}
return buildClientsOrDie(cfg)
}
func GetConfig() (*rest.Config, error) {
// If kubeconfigPath provided, use that
if len(kubeconfigPath) > 0 {
return clientcmd.BuildConfigFromFlags(masterUrl, kubeconfigPath)
}
// If an env variable is specified with the config locaiton, use that
if len(os.Getenv("KUBECONFIG")) > 0 {
return clientcmd.BuildConfigFromFlags(masterUrl, os.Getenv("KUBECONFIG"))
}
// If no explicit location, try the in-cluster config
if c, err := rest.InClusterConfig(); err == nil {
return c, nil
}
return nil, fmt.Errorf("could not locate a kubeconfig")
}
type Client interface {
kubernetes.Interface
PingcapV1alpha1() v1alpha1.PingcapV1alpha1Interface
}
func Union(kube kubernetes.Interface, tidb versioned.Interface) Client {
return &client{Interface: kube, pingcap: tidb}
}
func NewOrDie() Client {
cfg, err := clientcmd.BuildConfigFromFlags(masterUrl, kubeconfigPath)
if err != nil {
panic(err)
}
return Union(kubernetes.NewForConfigOrDie(cfg), versioned.NewForConfigOrDie(cfg))
}
type client struct {
kubernetes.Interface
pingcap versioned.Interface
}
func (cli *client) PingcapV1alpha1() v1alpha1.PingcapV1alpha1Interface {
return cli.pingcap.PingcapV1alpha1()
}
func SetConfigPath(path string) {
kubeconfigPath = path
}
func SetMasterURL(url string) {
masterUrl = url
}
func LoadConfig() (*rest.Config, error) {
cfg, err := clientcmd.BuildConfigFromFlags(masterUrl, kubeconfigPath)
return cfg, errors.Trace(err)
}
func buildClientsOrDie(cfg *rest.Config) (versioned.Interface, kubernetes.Interface) {
cfg.Timeout = 30 * time.Second
cli, err := versioned.NewForConfig(cfg)
if err != nil {
panic(err)
}
kubeCli, err := kubernetes.NewForConfig(cfg)
if err != nil {
panic(err)
}
return cli, kubeCli
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
api/admin/init.go
|
package admin
import (
"os"
"github.com/WeixinCloud/wxcloudrun-wxcomponent/comm/encrypt"
"github.com/WeixinCloud/wxcloudrun-wxcomponent/comm/log"
"github.com/WeixinCloud/wxcloudrun-wxcomponent/db/dao"
)
// InitAdmin 初始化管理员
func InitAdmin(username, password string) error {
if err := dao.AddUserRecordIfNeeded(username, password); err != nil {
log.Errorf("InitAuth err %v", err)
return err
}
return nil
}
// Init 初始化管理员
func Init() error {
username := os.Getenv("MYSQL_USERNAME")
password := os.Getenv("MYSQL_PASSWORD")
log.Debugf("GetUser user[%s] pwd[%s]", username, password)
// conv password like website
md5Pwd := encrypt.GenerateMd5(password)
_ = InitAdmin(username, md5Pwd)
return nil
}
|
[
"\"MYSQL_USERNAME\"",
"\"MYSQL_PASSWORD\""
] |
[] |
[
"MYSQL_PASSWORD",
"MYSQL_USERNAME"
] |
[]
|
["MYSQL_PASSWORD", "MYSQL_USERNAME"]
|
go
| 2 | 0 | |
server/common/config.go
|
package common
import (
"net/http"
"os"
"github.com/gin-gonic/gin"
)
func ConfigHandler(c *gin.Context) {
glusterApi := os.Getenv("GLUSTER_API_URL")
nfsApi := os.Getenv("NFS_API_URL")
ddcApi := os.Getenv("DDC_API")
c.JSON(http.StatusOK, FeatureToggleResponse{
DDC: ddcApi != "",
Gluster: glusterApi != "",
Nfs: nfsApi != "",
})
}
|
[
"\"GLUSTER_API_URL\"",
"\"NFS_API_URL\"",
"\"DDC_API\""
] |
[] |
[
"NFS_API_URL",
"GLUSTER_API_URL",
"DDC_API"
] |
[]
|
["NFS_API_URL", "GLUSTER_API_URL", "DDC_API"]
|
go
| 3 | 0 | |
gopath/src/vendor/vendored/vendored.go
|
package vendored
import (
"fmt"
"go/build"
"os"
"path/filepath"
"runtime"
"strings"
)
var (
gGoPath string
gGoRoot string
)
func init() {
PackageMain()
panic("never refer this package")
}
func PackageMain() {
fmt.Println("in package: " + GetPackagePath())
TellImport("nothing")
}
func GetPackageName() string {
_, name := filepath.Split(GetPackagePath())
return name
}
func TellImport(imp string) {
fmt.Printf("%s import [%s]\n", GetPackageName(), imp)
}
func AssertRelatedGoPath(related string) {
root := GetGopath()
path := GetPackagePath()
AssertRelatedPath(root, related, path)
}
func AssertRelatedGoRoot(related string) {
root := GetGoRoot()
path := GetPackagePath()
AssertRelatedPath(root, related, path)
}
func AssertRelatedLocalRoot(related string) {
root := filepath.Join(GetLocalRoot(), "src")
path := GetPackagePath()
AssertRelatedPath(root, related, path)
}
func AssertRelatedPath(root, related, dest string) {
if filepath.Join(root, related) != filepath.Clean(dest) {
panic(fmt.Sprintf("PathCheck: root=[%s][%s] dest=%s not match", root, related, dest))
}
}
func GetPackagePath() string {
return GetThisFilepath(1)
}
func GetGoRoot() string {
if gGoRoot == "" {
s := os.Getenv("GOROOT")
if ss := strings.Split(s, ";"); ss != nil && len(ss) > 0 {
gGoRoot = filepath.Clean(ss[0] + "/src")
}
}
return gGoRoot
}
func GetGopath() string {
if gGoPath == "" {
s := os.Getenv("GOPATH")
if ss := strings.Split(s, ";"); ss != nil && len(ss) > 0 {
gGoPath = filepath.Clean(ss[0] + "/src")
}
}
return gGoPath
}
func GetLocalRoot() string {
return GetLocalRootOfCaller(3)
}
func GetLocalRootOfCaller(depth int) string {
return build.Default.SearchLocalRoot(filepath.Dir(GetThisFilepath(depth)))
}
func GetThisFilepath(depth int) string {
thisFile, _ := FileLine(depth)
thisFilePath := filepath.Dir(thisFile)
return thisFilePath
}
func GetReleatGopath(s string) string {
ss, _ := filepath.Rel(GetGopath(), s)
return ss
}
//the caller's file/line info
func FileLine(depth int) (file string, line int) {
if _, __file, __line, __ok := runtime.Caller(depth); __ok {
file, line = __file, __line
}
return
}
|
[
"\"GOROOT\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"GOROOT"
] |
[]
|
["GOPATH", "GOROOT"]
|
go
| 2 | 0 | |
pymeasure/experiment/config.py
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2020 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import configparser
import logging
import os
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def set_file(filename):
os.environ['CONFIG'] = filename
def get_config(filename='default_config.ini'):
if 'CONFIG' in os.environ.keys():
filename = os.environ['CONFIG']
config = configparser.ConfigParser()
config.read(filename)
return config
# noinspection PyProtectedMember
def set_mpl_rcparams(config):
if 'matplotlib.rcParams' in config._sections.keys():
import matplotlib
for key in config._sections['matplotlib.rcParams']:
matplotlib.rcParams[key] = eval(config._sections['matplotlib.rcParams'][key])
|
[] |
[] |
[
"CONFIG"
] |
[]
|
["CONFIG"]
|
python
| 1 | 0 | |
server.go
|
package main
import (
"hkpcug/newsgroup-gql/graph"
"hkpcug/newsgroup-gql/graph/generated"
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/playground"
)
const defaultPort = "8080"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}}))
http.Handle("/", playground.Handler("GraphQL playground", "/query"))
http.Handle("/query", srv)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
certbot-ci/certbot_integration_tests/utils/misc.py
|
"""
Misc module contains stateless functions that could be used during pytest execution,
or outside during setup/teardown of the integration tests environment.
"""
import contextlib
import errno
import multiprocessing
import os
import re
import shutil
import stat
import sys
import tempfile
import time
import warnings
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.hazmat.primitives.serialization import NoEncryption
from cryptography.hazmat.primitives.serialization import PrivateFormat
from cryptography.x509 import load_pem_x509_certificate
from OpenSSL import crypto
import pkg_resources
import requests
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from certbot_integration_tests.utils.constants import \
PEBBLE_ALTERNATE_ROOTS, PEBBLE_MANAGEMENT_URL
RSA_KEY_TYPE = 'rsa'
ECDSA_KEY_TYPE = 'ecdsa'
def _suppress_x509_verification_warnings():
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
# Handle old versions of request with vendorized urllib3
# pylint: disable=no-member
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def check_until_timeout(url, attempts=30):
"""
Wait and block until given url responds with status 200, or raise an exception
after the specified number of attempts.
:param str url: the URL to test
:param int attempts: the number of times to try to connect to the URL
:raise ValueError: exception raised if unable to reach the URL
"""
_suppress_x509_verification_warnings()
for _ in range(attempts):
time.sleep(1)
try:
if requests.get(url, verify=False).status_code == 200:
return
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after {0} attempts: {1}'.format(attempts, url))
class GracefulTCPServer(socketserver.TCPServer):
"""
This subclass of TCPServer allows graceful reuse of an address that has
just been released by another instance of TCPServer.
"""
allow_reuse_address = True
def _run_server(port):
GracefulTCPServer(('', port), SimpleHTTPServer.SimpleHTTPRequestHandler).serve_forever()
@contextlib.contextmanager
def create_http_server(port):
"""
Setup and start an HTTP server for the given TCP port.
This server stays active for the lifetime of the context, and is automatically
stopped with context exit, while its temporary webroot is deleted.
:param int port: the TCP port to use
:return str: the temporary webroot attached to this server
"""
current_cwd = os.getcwd()
webroot = tempfile.mkdtemp()
process = multiprocessing.Process(target=_run_server, args=(port,))
try:
# SimpleHTTPServer is designed to serve files from the current working directory at the
# time it starts. So we temporarily change the cwd to our crafted webroot before launch.
try:
os.chdir(webroot)
process.start()
finally:
os.chdir(current_cwd)
check_until_timeout('http://localhost:{0}/'.format(port))
yield webroot
finally:
try:
if process.is_alive():
process.terminate()
process.join() # Block until process is effectively terminated
finally:
shutil.rmtree(webroot)
def list_renewal_hooks_dirs(config_dir):
"""
Find and return paths of all hook directories for the given certbot config directory
:param str config_dir: path to the certbot config directory
:return str[]: list of path to the standard hooks directory for this certbot instance
"""
renewal_hooks_root = os.path.join(config_dir, 'renewal-hooks')
return [os.path.join(renewal_hooks_root, item) for item in ['pre', 'deploy', 'post']]
def generate_test_file_hooks(config_dir, hook_probe):
"""
Create a suite of certbot hook scripts and put them in the relevant hook directory
for the given certbot configuration directory. These scripts, when executed, will write
specific verbs in the given hook_probe file to allow asserting they have effectively
been executed. The deploy hook also checks that the renewal environment variables are set.
:param str config_dir: current certbot config directory
:param hook_probe: path to the hook probe to test hook scripts execution
"""
hook_path = pkg_resources.resource_filename('certbot_integration_tests', 'assets/hook.py')
for hook_dir in list_renewal_hooks_dirs(config_dir):
# We want an equivalent of bash `chmod -p $HOOK_DIR, that does not fail if one folder of
# the hierarchy already exists. It is not the case of os.makedirs. Python 3 has an
# optional parameter `exists_ok` to not fail on existing dir, but Python 2.7 does not.
# So we pass through a try except pass for it. To be removed with dropped support on py27.
try:
os.makedirs(hook_dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
if os.name != 'nt':
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.sh')
entrypoint_script = '''\
#!/usr/bin/env bash
set -e
"{0}" "{1}" "{2}" >> "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
else:
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.ps1')
entrypoint_script = '''\
& "{0}" "{1}" "{2}" >> "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
with open(entrypoint_script_path, 'w') as file_h:
file_h.write(entrypoint_script)
os.chmod(entrypoint_script_path, os.stat(entrypoint_script_path).st_mode | stat.S_IEXEC)
@contextlib.contextmanager
def manual_http_hooks(http_server_root, http_port):
"""
Generate suitable http-01 hooks command for test purpose in the given HTTP
server webroot directory. These hooks command use temporary python scripts
that are deleted upon context exit.
:param str http_server_root: path to the HTTP server configured to serve http-01 challenges
:param int http_port: HTTP port that the HTTP server listen on
:return (str, str): a tuple containing the authentication hook and cleanup hook commands
"""
tempdir = tempfile.mkdtemp()
try:
auth_script_path = os.path.join(tempdir, 'auth.py')
with open(auth_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import requests
import time
import sys
challenge_dir = os.path.join('{0}', '.well-known', 'acme-challenge')
os.makedirs(challenge_dir)
challenge_file = os.path.join(challenge_dir, os.environ.get('CERTBOT_TOKEN'))
with open(challenge_file, 'w') as file_h:
file_h.write(os.environ.get('CERTBOT_VALIDATION'))
url = 'http://localhost:{1}/.well-known/acme-challenge/' + os.environ.get('CERTBOT_TOKEN')
for _ in range(0, 10):
time.sleep(1)
try:
if request.get(url).status_code == 200:
sys.exit(0)
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after 10 attempts: {{0}}'.format(url))
'''.format(http_server_root.replace('\\', '\\\\'), http_port))
os.chmod(auth_script_path, 0o755)
cleanup_script_path = os.path.join(tempdir, 'cleanup.py')
with open(cleanup_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import shutil
well_known = os.path.join('{0}', '.well-known')
shutil.rmtree(well_known)
'''.format(http_server_root.replace('\\', '\\\\')))
os.chmod(cleanup_script_path, 0o755)
yield ('{0} {1}'.format(sys.executable, auth_script_path),
'{0} {1}'.format(sys.executable, cleanup_script_path))
finally:
shutil.rmtree(tempdir)
def generate_csr(domains, key_path, csr_path, key_type=RSA_KEY_TYPE):
"""
Generate a private key, and a CSR for the given domains using this key.
:param domains: the domain names to include in the CSR
:type domains: `list` of `str`
:param str key_path: path to the private key that will be generated
:param str csr_path: path to the CSR that will be generated
:param str key_type: type of the key (misc.RSA_KEY_TYPE or misc.ECDSA_KEY_TYPE)
"""
if key_type == RSA_KEY_TYPE:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
elif key_type == ECDSA_KEY_TYPE:
with warnings.catch_warnings():
# Ignore a warning on some old versions of cryptography
warnings.simplefilter('ignore', category=PendingDeprecationWarning)
key = ec.generate_private_key(ec.SECP384R1(), default_backend())
key = key.private_bytes(encoding=Encoding.PEM, format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption())
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
else:
raise ValueError('Invalid key type: {0}'.format(key_type))
with open(key_path, 'wb') as file_h:
file_h.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
req = crypto.X509Req()
san = ', '.join('DNS:{0}'.format(item) for item in domains)
san_constraint = crypto.X509Extension(b'subjectAltName', False, san.encode('utf-8'))
req.add_extensions([san_constraint])
req.set_pubkey(key)
req.set_version(2)
req.sign(key, 'sha256')
with open(csr_path, 'wb') as file_h:
file_h.write(crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req))
def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version
of it (TEXT mode).
:param str cert_path: the path to the certificate
:returns: the TEXT version of the certificate, as it would be displayed by openssl binary
"""
with open(cert_path, 'rb') as file:
data = file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, data)
return crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode('utf-8')
def load_sample_data_path(workspace):
"""
Load the certbot configuration example designed to make OCSP tests, and return its path
:param str workspace: current test workspace directory path
:returns: the path to the loaded sample data directory
:rtype: str
"""
original = pkg_resources.resource_filename('certbot_integration_tests', 'assets/sample-config')
copied = os.path.join(workspace, 'sample-config')
shutil.copytree(original, copied, symlinks=True)
if os.name == 'nt':
# Fix the symlinks on Windows if GIT is not configured to create them upon checkout
for lineage in [
'a.encryption-example.com',
'b.encryption-example.com',
'c.encryption-example.com',
]:
current_live = os.path.join(copied, 'live', lineage)
for name in os.listdir(current_live):
if name != 'README':
current_file = os.path.join(current_live, name)
if not os.path.islink(current_file):
with open(current_file) as file_h:
src = file_h.read()
os.unlink(current_file)
os.symlink(os.path.join(current_live, src), current_file)
return copied
def echo(keyword, path=None):
"""
Generate a platform independent executable command
that echoes the given keyword into the given file.
:param keyword: the keyword to echo (must be a single keyword)
:param path: path to the file were keyword is echoed
:return: the executable command
"""
if not re.match(r'^\w+$', keyword):
raise ValueError('Error, keyword `{0}` is not a single keyword.'
.format(keyword))
return '{0} -c "from __future__ import print_function; print(\'{1}\')"{2}'.format(
os.path.basename(sys.executable), keyword, ' >> "{0}"'.format(path) if path else '')
def get_acme_issuers(context):
"""Gets the list of one or more issuer certificates from the ACME server used by the
context.
:param context: the testing context.
:return: the `list of x509.Certificate` representing the list of issuers.
"""
# TODO: in fact, Boulder has alternate chains in config-next/, just not yet in config/.
if context.acme_server != "pebble":
raise NotImplementedError()
_suppress_x509_verification_warnings()
issuers = []
for i in range(PEBBLE_ALTERNATE_ROOTS + 1):
request = requests.get(PEBBLE_MANAGEMENT_URL + '/intermediates/{}'.format(i), verify=False)
issuers.append(load_pem_x509_certificate(request.content, default_backend()))
return issuers
|
[] |
[] |
[
"CERTBOT_VALIDATION",
"CERTBOT_TOKEN"
] |
[]
|
["CERTBOT_VALIDATION", "CERTBOT_TOKEN"]
|
python
| 2 | 0 | |
vendor/github.com/hashicorp/vault/vault/expiration.go
|
package vault
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/armon/go-metrics"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/errwrap"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
const (
// expirationSubPath is the sub-path used for the expiration manager
// view. This is nested under the system view.
expirationSubPath = "expire/"
// leaseViewPrefix is the prefix used for the ID based lookup of leases.
leaseViewPrefix = "id/"
// tokenViewPrefix is the prefix used for the token based lookup of leases.
tokenViewPrefix = "token/"
// maxRevokeAttempts limits how many revoke attempts are made
maxRevokeAttempts = 6
// revokeRetryBase is a baseline retry time
revokeRetryBase = 10 * time.Second
// maxLeaseDuration is the default maximum lease duration
maxLeaseTTL = 32 * 24 * time.Hour
// defaultLeaseDuration is the default lease duration used when no lease is specified
defaultLeaseTTL = maxLeaseTTL
//maxLeaseThreshold is the maximum lease count before generating log warning
maxLeaseThreshold = 256000
)
type pendingInfo struct {
exportLeaseTimes *leaseEntry
timer *time.Timer
}
// ExpirationManager is used by the Core to manage leases. Secrets
// can provide a lease, meaning that they can be renewed or revoked.
// If a secret is not renewed in timely manner, it may be expired, and
// the ExpirationManager will handle doing automatic revocation.
type ExpirationManager struct {
router *Router
idView *BarrierView
tokenView *BarrierView
tokenStore *TokenStore
logger log.Logger
pending map[string]pendingInfo
pendingLock sync.RWMutex
tidyLock *int32
restoreMode *int32
restoreModeLock sync.RWMutex
restoreRequestLock sync.RWMutex
restoreLocks []*locksutil.LockEntry
restoreLoaded sync.Map
quitCh chan struct{}
coreStateLock *sync.RWMutex
quitContext context.Context
leaseCheckCounter *uint32
logLeaseExpirations bool
}
// NewExpirationManager creates a new ExpirationManager that is backed
// using a given view, and uses the provided router for revocation.
func NewExpirationManager(c *Core, view *BarrierView, logger log.Logger) *ExpirationManager {
exp := &ExpirationManager{
router: c.router,
idView: view.SubView(leaseViewPrefix),
tokenView: view.SubView(tokenViewPrefix),
tokenStore: c.tokenStore,
logger: logger,
pending: make(map[string]pendingInfo),
tidyLock: new(int32),
// new instances of the expiration manager will go immediately into
// restore mode
restoreMode: new(int32),
restoreLocks: locksutil.CreateLocks(),
quitCh: make(chan struct{}),
coreStateLock: &c.stateLock,
quitContext: c.activeContext,
leaseCheckCounter: new(uint32),
logLeaseExpirations: os.Getenv("VAULT_SKIP_LOGGING_LEASE_EXPIRATIONS") == "",
}
*exp.restoreMode = 1
if exp.logger == nil {
opts := log.LoggerOptions{Name: "expiration_manager"}
exp.logger = log.New(&opts)
}
return exp
}
// setupExpiration is invoked after we've loaded the mount table to
// initialize the expiration manager
func (c *Core) setupExpiration() error {
c.metricsMutex.Lock()
defer c.metricsMutex.Unlock()
// Create a sub-view
view := c.systemBarrierView.SubView(expirationSubPath)
// Create the manager
mgr := NewExpirationManager(c, view, c.logger.ResetNamed("expiration"))
c.expiration = mgr
// Link the token store to this
c.tokenStore.SetExpirationManager(mgr)
// Restore the existing state
c.logger.Info("restoring leases")
errorFunc := func() {
c.logger.Error("shutting down")
if err := c.Shutdown(); err != nil {
c.logger.Error("error shutting down core: %v", err)
}
}
go c.expiration.Restore(errorFunc)
return nil
}
// stopExpiration is used to stop the expiration manager before
// sealing the Vault.
func (c *Core) stopExpiration() error {
if c.expiration != nil {
if err := c.expiration.Stop(); err != nil {
return err
}
c.metricsMutex.Lock()
defer c.metricsMutex.Unlock()
c.expiration = nil
}
return nil
}
// lockLease takes out a lock for a given lease ID
func (m *ExpirationManager) lockLease(leaseID string) {
locksutil.LockForKey(m.restoreLocks, leaseID).Lock()
}
// unlockLease unlocks a given lease ID
func (m *ExpirationManager) unlockLease(leaseID string) {
locksutil.LockForKey(m.restoreLocks, leaseID).Unlock()
}
// inRestoreMode returns if we are currently in restore mode
func (m *ExpirationManager) inRestoreMode() bool {
return atomic.LoadInt32(m.restoreMode) == 1
}
// Tidy cleans up the dangling storage entries for leases. It scans the storage
// view to find all the available leases, checks if the token embedded in it is
// either empty or invalid and in both the cases, it revokes them. It also uses
// a token cache to avoid multiple lookups of the same token ID. It is normally
// not required to use the API that invokes this. This is only intended to
// clean up the corrupt storage due to bugs.
func (m *ExpirationManager) Tidy() error {
if m.inRestoreMode() {
return errors.New("cannot run tidy while restoring leases")
}
var tidyErrors *multierror.Error
logger := m.logger.Named("tidy")
if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) {
logger.Warn("tidy operation on leases is already in progress")
return nil
}
defer atomic.CompareAndSwapInt32(m.tidyLock, 1, 0)
logger.Info("beginning tidy operation on leases")
defer logger.Info("finished tidy operation on leases")
// Create a cache to keep track of looked up tokens
tokenCache := make(map[string]bool)
var countLease, revokedCount, deletedCountInvalidToken, deletedCountEmptyToken int64
tidyFunc := func(leaseID string) {
countLease++
if countLease%500 == 0 {
logger.Info("tidying leases", "progress", countLease)
}
le, err := m.loadEntry(leaseID)
if err != nil {
tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to load the lease ID %q: {{err}}", leaseID), err))
return
}
if le == nil {
tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("nil entry for lease ID %q: {{err}}", leaseID), err))
return
}
var isValid, ok bool
revokeLease := false
if le.ClientToken == "" {
logger.Debug("revoking lease which has an empty token", "lease_id", leaseID)
revokeLease = true
deletedCountEmptyToken++
goto REVOKE_CHECK
}
isValid, ok = tokenCache[le.ClientToken]
if !ok {
saltedID, err := m.tokenStore.SaltID(m.quitContext, le.ClientToken)
if err != nil {
tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup salt id: {{err}}", err))
return
}
lock := locksutil.LockForKey(m.tokenStore.tokenLocks, le.ClientToken)
lock.RLock()
te, err := m.tokenStore.lookupSalted(m.quitContext, saltedID, true)
lock.RUnlock()
if err != nil {
tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup token: {{err}}", err))
return
}
if te == nil {
logger.Debug("revoking lease which holds an invalid token", "lease_id", leaseID)
revokeLease = true
deletedCountInvalidToken++
tokenCache[le.ClientToken] = false
} else {
tokenCache[le.ClientToken] = true
}
goto REVOKE_CHECK
} else {
if isValid {
return
}
logger.Debug("revoking lease which contains an invalid token", "lease_id", leaseID)
revokeLease = true
deletedCountInvalidToken++
goto REVOKE_CHECK
}
REVOKE_CHECK:
if revokeLease {
// Force the revocation and skip going through the token store
// again
err = m.revokeCommon(leaseID, true, true)
if err != nil {
tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to revoke an invalid lease with ID %q: {{err}}", leaseID), err))
return
}
revokedCount++
}
}
if err := logical.ScanView(m.quitContext, m.idView, tidyFunc); err != nil {
return err
}
logger.Info("number of leases scanned", "count", countLease)
logger.Info("number of leases which had empty tokens", "count", deletedCountEmptyToken)
logger.Info("number of leases which had invalid tokens", "count", deletedCountInvalidToken)
logger.Info("number of leases successfully revoked", "count", revokedCount)
return tidyErrors.ErrorOrNil()
}
// Restore is used to recover the lease states when starting.
// This is used after starting the vault.
func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) {
defer func() {
// Turn off restore mode. We can do this safely without the lock because
// if restore mode finished successfully, restore mode was already
// disabled with the lock. In an error state, this will allow the
// Stop() function to shut everything down.
atomic.StoreInt32(m.restoreMode, 0)
switch {
case retErr == nil:
case errwrap.Contains(retErr, ErrBarrierSealed.Error()):
// Don't run error func because we're likely already shutting down
m.logger.Warn("barrier sealed while restoring leases, stopping lease loading")
retErr = nil
default:
m.logger.Error("error restoring leases", "error", retErr)
if errorFunc != nil {
errorFunc()
}
}
}()
// Accumulate existing leases
m.logger.Debug("collecting leases")
existing, err := logical.CollectKeys(m.quitContext, m.idView)
if err != nil {
return errwrap.Wrapf("failed to scan for leases: {{err}}", err)
}
m.logger.Debug("leases collected", "num_existing", len(existing))
// Make the channels used for the worker pool
broker := make(chan string)
quit := make(chan bool)
// Buffer these channels to prevent deadlocks
errs := make(chan error, len(existing))
result := make(chan struct{}, len(existing))
// Use a wait group
wg := &sync.WaitGroup{}
// Create 64 workers to distribute work to
for i := 0; i < consts.ExpirationRestoreWorkerCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case leaseID, ok := <-broker:
// broker has been closed, we are done
if !ok {
return
}
err := m.processRestore(leaseID)
if err != nil {
errs <- err
continue
}
// Send message that lease is done
result <- struct{}{}
// quit early
case <-quit:
return
case <-m.quitCh:
return
}
}
}()
}
// Distribute the collected keys to the workers in a go routine
wg.Add(1)
go func() {
defer wg.Done()
for i, leaseID := range existing {
if i > 0 && i%500 == 0 {
m.logger.Debug("leases loading", "progress", i)
}
select {
case <-quit:
return
case <-m.quitCh:
return
default:
broker <- leaseID
}
}
// Close the broker, causing worker routines to exit
close(broker)
}()
// Ensure all keys on the chan are processed
for i := 0; i < len(existing); i++ {
select {
case err := <-errs:
// Close all go routines
close(quit)
return err
case <-m.quitCh:
close(quit)
return nil
case <-result:
}
}
// Let all go routines finish
wg.Wait()
m.restoreModeLock.Lock()
m.restoreLoaded = sync.Map{}
m.restoreLocks = nil
atomic.StoreInt32(m.restoreMode, 0)
m.restoreModeLock.Unlock()
m.logger.Info("lease restore complete")
return nil
}
// processRestore takes a lease and restores it in the expiration manager if it has
// not already been seen
func (m *ExpirationManager) processRestore(leaseID string) error {
m.restoreRequestLock.RLock()
defer m.restoreRequestLock.RUnlock()
// Check if the lease has been seen
if _, ok := m.restoreLoaded.Load(leaseID); ok {
return nil
}
m.lockLease(leaseID)
defer m.unlockLease(leaseID)
// Check again with the lease locked
if _, ok := m.restoreLoaded.Load(leaseID); ok {
return nil
}
// Load lease and restore expiration timer
_, err := m.loadEntryInternal(leaseID, true, false)
if err != nil {
return err
}
return nil
}
// Stop is used to prevent further automatic revocations.
// This must be called before sealing the view.
func (m *ExpirationManager) Stop() error {
// Stop all the pending expiration timers
m.logger.Debug("stop triggered")
defer m.logger.Debug("finished stopping")
// Do this before stopping pending timers to avoid potential races with
// expiring timers
close(m.quitCh)
m.pendingLock.Lock()
for _, pending := range m.pending {
pending.timer.Stop()
}
m.pending = make(map[string]pendingInfo)
m.pendingLock.Unlock()
if m.inRestoreMode() {
for {
if !m.inRestoreMode() {
break
}
time.Sleep(10 * time.Millisecond)
}
}
return nil
}
// Revoke is used to revoke a secret named by the given LeaseID
func (m *ExpirationManager) Revoke(leaseID string) error {
defer metrics.MeasureSince([]string{"expire", "revoke"}, time.Now())
return m.revokeCommon(leaseID, false, false)
}
// revokeCommon does the heavy lifting. If force is true, we ignore a problem
// during revocation and still remove entries/index/lease timers
func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool) error {
defer metrics.MeasureSince([]string{"expire", "revoke-common"}, time.Now())
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
return err
}
// If there is no entry, nothing to revoke
if le == nil {
return nil
}
// Revoke the entry
if !skipToken || le.Auth == nil {
if err := m.revokeEntry(le); err != nil {
if !force {
return err
}
if m.logger.IsWarn() {
m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err)
}
}
}
// Delete the entry
if err := m.deleteEntry(leaseID); err != nil {
return err
}
// Delete the secondary index, but only if it's a leased secret (not auth)
if le.Secret != nil {
if err := m.removeIndexByToken(le.ClientToken, le.LeaseID); err != nil {
return err
}
}
// Clear the expiration handler
m.pendingLock.Lock()
if pending, ok := m.pending[leaseID]; ok {
pending.timer.Stop()
delete(m.pending, leaseID)
}
m.pendingLock.Unlock()
if m.logger.IsInfo() && !skipToken && m.logLeaseExpirations {
m.logger.Info("revoked lease", "lease_id", leaseID)
}
return nil
}
// RevokeForce works similarly to RevokePrefix but continues in the case of a
// revocation error; this is mostly meant for recovery operations
func (m *ExpirationManager) RevokeForce(prefix string) error {
defer metrics.MeasureSince([]string{"expire", "revoke-force"}, time.Now())
return m.revokePrefixCommon(prefix, true)
}
// RevokePrefix is used to revoke all secrets with a given prefix.
// The prefix maps to that of the mount table to make this simpler
// to reason about.
func (m *ExpirationManager) RevokePrefix(prefix string) error {
defer metrics.MeasureSince([]string{"expire", "revoke-prefix"}, time.Now())
return m.revokePrefixCommon(prefix, false)
}
// RevokeByToken is used to revoke all the secrets issued with a given token.
// This is done by using the secondary index. It also removes the lease entry
// for the token itself. As a result it should *ONLY* ever be called from the
// token store's revokeSalted function.
func (m *ExpirationManager) RevokeByToken(te *logical.TokenEntry) error {
defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now())
// Lookup the leases
existing, err := m.lookupLeasesByToken(te.ID)
if err != nil {
return errwrap.Wrapf("failed to scan for leases: {{err}}", err)
}
// Revoke all the keys
for _, leaseID := range existing {
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
return err
}
// If there's a lease, set expiration to now, persist, and call
// updatePending to hand off revocation to the expiration manager's pending
// timer map
if le != nil {
le.ExpireTime = time.Now()
{
m.pendingLock.Lock()
if err := m.persistEntry(le); err != nil {
m.pendingLock.Unlock()
return err
}
m.updatePendingInternal(le, 0)
m.pendingLock.Unlock()
}
}
}
// te.Path should never be empty, but we check just in case
if te.Path != "" {
saltedID, err := m.tokenStore.SaltID(m.quitContext, te.ID)
if err != nil {
return err
}
tokenLeaseID := path.Join(te.Path, saltedID)
// We want to skip the revokeEntry call as that will call back into
// revocation logic in the token store, which is what is running this
// function in the first place -- it'd be a deadlock loop. Since the only
// place that this function is called is revokeSalted in the token store,
// we're already revoking the token, so we just want to clean up the lease.
// This avoids spurious revocations later in the log when the timer runs
// out, and eases up resource usage.
return m.revokeCommon(tokenLeaseID, false, true)
}
return nil
}
func (m *ExpirationManager) revokePrefixCommon(prefix string, force bool) error {
if m.inRestoreMode() {
m.restoreRequestLock.Lock()
defer m.restoreRequestLock.Unlock()
}
// Ensure there is a trailing slash; or, if there is no slash, see if there
// is a matching specific ID
if !strings.HasSuffix(prefix, "/") {
le, err := m.loadEntry(prefix)
if err == nil && le != nil {
if err := m.revokeCommon(prefix, force, false); err != nil {
return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q: {{err}}", prefix), err)
}
return nil
}
prefix = prefix + "/"
}
// Accumulate existing leases
sub := m.idView.SubView(prefix)
existing, err := logical.CollectKeys(m.quitContext, sub)
if err != nil {
return errwrap.Wrapf("failed to scan for leases: {{err}}", err)
}
// Revoke all the keys
for idx, suffix := range existing {
leaseID := prefix + suffix
if err := m.revokeCommon(leaseID, force, false); err != nil {
return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q (%d / %d): {{err}}", leaseID, idx+1, len(existing)), err)
}
}
return nil
}
// Renew is used to renew a secret using the given leaseID
// and a renew interval. The increment may be ignored.
func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*logical.Response, error) {
defer metrics.MeasureSince([]string{"expire", "renew"}, time.Now())
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
return nil, err
}
// Check if the lease is renewable
if _, err := le.renewable(); err != nil {
return nil, err
}
if le.Secret == nil {
if le.Auth != nil {
return logical.ErrorResponse("tokens cannot be renewed through this endpoint"), logical.ErrPermissionDenied
}
return logical.ErrorResponse("lease does not correspond to a secret"), nil
}
sysView := m.router.MatchingSystemView(le.Path)
if sysView == nil {
return nil, fmt.Errorf("unable to retrieve system view from router")
}
// Attempt to renew the entry
resp, err := m.renewEntry(le, increment)
if err != nil {
return nil, err
}
if resp == nil {
return nil, nil
}
if resp.IsError() {
return &logical.Response{
Data: resp.Data,
}, nil
}
if resp.Secret == nil {
return nil, nil
}
ttl, warnings, err := framework.CalculateTTL(sysView, increment, resp.Secret.TTL, 0, resp.Secret.MaxTTL, 0, le.IssueTime)
if err != nil {
return nil, err
}
for _, warning := range warnings {
resp.AddWarning(warning)
}
resp.Secret.TTL = ttl
// Attach the LeaseID
resp.Secret.LeaseID = leaseID
// Update the lease entry
le.Data = resp.Data
le.Secret = resp.Secret
le.ExpireTime = resp.Secret.ExpirationTime()
le.LastRenewalTime = time.Now()
{
m.pendingLock.Lock()
if err := m.persistEntry(le); err != nil {
m.pendingLock.Unlock()
return nil, err
}
// Update the expiration time
m.updatePendingInternal(le, resp.Secret.LeaseTotal())
m.pendingLock.Unlock()
}
// Return the response
return resp, nil
}
// RenewToken is used to renew a token which does not need to
// invoke a logical backend.
func (m *ExpirationManager) RenewToken(req *logical.Request, source string, token string,
increment time.Duration) (*logical.Response, error) {
defer metrics.MeasureSince([]string{"expire", "renew-token"}, time.Now())
// Compute the Lease ID
saltedID, err := m.tokenStore.SaltID(m.quitContext, token)
if err != nil {
return nil, err
}
leaseID := path.Join(source, saltedID)
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
return nil, err
}
// Check if the lease is renewable. Note that this also checks for a nil
// lease and errors in that case as well.
if _, err := le.renewable(); err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
// Attempt to renew the auth entry
resp, err := m.renewAuthEntry(req, le, increment)
if err != nil {
return nil, err
}
if resp == nil {
return nil, nil
}
if resp.IsError() {
return &logical.Response{
Data: resp.Data,
}, nil
}
if resp.Auth == nil {
return nil, nil
}
sysView := m.router.MatchingSystemView(le.Path)
if sysView == nil {
return nil, fmt.Errorf("unable to retrieve system view from router")
}
ttl, warnings, err := framework.CalculateTTL(sysView, increment, resp.Auth.TTL, resp.Auth.Period, resp.Auth.MaxTTL, resp.Auth.ExplicitMaxTTL, le.IssueTime)
if err != nil {
return nil, err
}
retResp := &logical.Response{}
for _, warning := range warnings {
retResp.AddWarning(warning)
}
resp.Auth.TTL = ttl
// Attach the ClientToken
resp.Auth.ClientToken = token
// Update the lease entry
le.Auth = resp.Auth
le.ExpireTime = resp.Auth.ExpirationTime()
le.LastRenewalTime = time.Now()
{
m.pendingLock.Lock()
if err := m.persistEntry(le); err != nil {
m.pendingLock.Unlock()
return nil, err
}
// Update the expiration time
m.updatePendingInternal(le, resp.Auth.LeaseTotal())
m.pendingLock.Unlock()
}
retResp.Auth = resp.Auth
return retResp, nil
}
// Register is used to take a request and response with an associated
// lease. The secret gets assigned a LeaseID and the management of
// of lease is assumed by the expiration manager.
func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Response) (id string, retErr error) {
defer metrics.MeasureSince([]string{"expire", "register"}, time.Now())
if req.ClientToken == "" {
return "", fmt.Errorf("cannot register a lease with an empty client token")
}
// Ignore if there is no leased secret
if resp == nil || resp.Secret == nil {
return "", nil
}
// Validate the secret
if err := resp.Secret.Validate(); err != nil {
return "", err
}
// Create a lease entry
leaseUUID, err := uuid.GenerateUUID()
if err != nil {
return "", err
}
leaseID := path.Join(req.Path, leaseUUID)
defer func() {
// If there is an error we want to rollback as much as possible (note
// that errors here are ignored to do as much cleanup as we can). We
// want to revoke a generated secret (since an error means we may not
// be successfully tracking it), remove indexes, and delete the entry.
if retErr != nil {
revResp, err := m.router.Route(m.quitContext, logical.RevokeRequest(req.Path, resp.Secret, resp.Data))
if err != nil {
retErr = multierror.Append(retErr, errwrap.Wrapf("an additional internal error was encountered revoking the newly-generated secret: {{err}}", err))
} else if revResp != nil && revResp.IsError() {
retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered revoking the newly-generated secret: {{err}}", revResp.Error()))
}
if err := m.deleteEntry(leaseID); err != nil {
retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered deleting any lease associated with the newly-generated secret: {{err}}", err))
}
if err := m.removeIndexByToken(req.ClientToken, leaseID); err != nil {
retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered removing lease indexes associated with the newly-generated secret: {{err}}", err))
}
}
}()
le := leaseEntry{
LeaseID: leaseID,
ClientToken: req.ClientToken,
Path: req.Path,
Data: resp.Data,
Secret: resp.Secret,
IssueTime: time.Now(),
ExpireTime: resp.Secret.ExpirationTime(),
}
// Encode the entry
if err := m.persistEntry(&le); err != nil {
return "", err
}
// Maintain secondary index by token
if err := m.createIndexByToken(le.ClientToken, le.LeaseID); err != nil {
return "", err
}
// Setup revocation timer if there is a lease
m.updatePending(&le, resp.Secret.LeaseTotal())
// Done
return le.LeaseID, nil
}
// RegisterAuth is used to take an Auth response with an associated lease.
// The token does not get a LeaseID, but the lease management is handled by
// the expiration manager.
func (m *ExpirationManager) RegisterAuth(source string, auth *logical.Auth) error {
defer metrics.MeasureSince([]string{"expire", "register-auth"}, time.Now())
if auth.ClientToken == "" {
return fmt.Errorf("cannot register an auth lease with an empty token")
}
if strings.Contains(source, "..") {
return consts.ErrPathContainsParentReferences
}
saltedID, err := m.tokenStore.SaltID(m.quitContext, auth.ClientToken)
if err != nil {
return err
}
// Create a lease entry
le := leaseEntry{
LeaseID: path.Join(source, saltedID),
ClientToken: auth.ClientToken,
Auth: auth,
Path: source,
IssueTime: time.Now(),
ExpireTime: auth.ExpirationTime(),
}
// Encode the entry
if err := m.persistEntry(&le); err != nil {
return err
}
// Setup revocation timer
m.updatePending(&le, auth.LeaseTotal())
return nil
}
// FetchLeaseTimesByToken is a helper function to use token values to compute
// the leaseID, rather than pushing that logic back into the token store.
func (m *ExpirationManager) FetchLeaseTimesByToken(source, token string) (*leaseEntry, error) {
defer metrics.MeasureSince([]string{"expire", "fetch-lease-times-by-token"}, time.Now())
// Compute the Lease ID
saltedID, err := m.tokenStore.SaltID(m.quitContext, token)
if err != nil {
return nil, err
}
leaseID := path.Join(source, saltedID)
return m.FetchLeaseTimes(leaseID)
}
// FetchLeaseTimes is used to fetch the issue time, expiration time, and last
// renewed time of a lease entry. It returns a leaseEntry itself, but with only
// those values copied over.
func (m *ExpirationManager) FetchLeaseTimes(leaseID string) (*leaseEntry, error) {
defer metrics.MeasureSince([]string{"expire", "fetch-lease-times"}, time.Now())
m.pendingLock.RLock()
val := m.pending[leaseID]
m.pendingLock.RUnlock()
if val.exportLeaseTimes != nil {
return val.exportLeaseTimes, nil
}
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
return nil, err
}
if le == nil {
return nil, nil
}
return m.leaseTimesForExport(le), nil
}
// Returns lease times for outside callers based on the full leaseEntry passed in
func (m *ExpirationManager) leaseTimesForExport(le *leaseEntry) *leaseEntry {
ret := &leaseEntry{
IssueTime: le.IssueTime,
ExpireTime: le.ExpireTime,
LastRenewalTime: le.LastRenewalTime,
}
if le.Secret != nil {
ret.Secret = &logical.Secret{}
ret.Secret.Renewable = le.Secret.Renewable
ret.Secret.TTL = le.Secret.TTL
}
if le.Auth != nil {
ret.Auth = &logical.Auth{}
ret.Auth.Renewable = le.Auth.Renewable
ret.Auth.TTL = le.Auth.TTL
}
return ret
}
// updatePending is used to update a pending invocation for a lease
func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Duration) {
m.pendingLock.Lock()
defer m.pendingLock.Unlock()
m.updatePendingInternal(le, leaseTotal)
}
// updatePendingInternal is the locked version of updatePending; do not call
// this without a write lock on m.pending
func (m *ExpirationManager) updatePendingInternal(le *leaseEntry, leaseTotal time.Duration) {
// Check for an existing timer
pending, ok := m.pending[le.LeaseID]
// If there is no expiry time, don't do anything
if le.ExpireTime.IsZero() {
// if the timer happened to exist, stop the time and delete it from the
// pending timers.
if ok {
pending.timer.Stop()
delete(m.pending, le.LeaseID)
}
return
}
// Create entry if it does not exist or reset if it does
if ok {
pending.timer.Reset(leaseTotal)
} else {
timer := time.AfterFunc(leaseTotal, func() {
m.expireID(le.LeaseID)
})
pending = pendingInfo{
timer: timer,
}
}
// Extend the timer by the lease total
pending.exportLeaseTimes = m.leaseTimesForExport(le)
m.pending[le.LeaseID] = pending
}
// expireID is invoked when a given ID is expired
func (m *ExpirationManager) expireID(leaseID string) {
// Clear from the pending expiration
m.pendingLock.Lock()
delete(m.pending, leaseID)
m.pendingLock.Unlock()
for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ {
select {
case <-m.quitCh:
m.logger.Error("shutting down, not attempting further revocation of lease", "lease_id", leaseID)
return
default:
}
m.coreStateLock.RLock()
if m.quitContext.Err() == context.Canceled {
m.logger.Error("core context canceled, not attempting further revocation of lease", "lease_id", leaseID)
m.coreStateLock.RUnlock()
return
}
err := m.Revoke(leaseID)
if err == nil {
m.coreStateLock.RUnlock()
return
}
m.coreStateLock.RUnlock()
m.logger.Error("failed to revoke lease", "lease_id", leaseID, "error", err)
time.Sleep((1 << attempt) * revokeRetryBase)
}
m.logger.Error("maximum revoke attempts reached", "lease_id", leaseID)
}
// revokeEntry is used to attempt revocation of an internal entry
func (m *ExpirationManager) revokeEntry(le *leaseEntry) error {
// Revocation of login tokens is special since we can by-pass the
// backend and directly interact with the token store
if le.Auth != nil {
if err := m.tokenStore.revokeTree(m.quitContext, le.ClientToken); err != nil {
return errwrap.Wrapf("failed to revoke token: {{err}}", err)
}
return nil
}
// Handle standard revocation via backends
resp, err := m.router.Route(m.quitContext, logical.RevokeRequest(le.Path, le.Secret, le.Data))
if err != nil || (resp != nil && resp.IsError()) {
return errwrap.Wrapf(fmt.Sprintf("failed to revoke entry: resp: %#v err: {{err}}", resp), err)
}
return nil
}
// renewEntry is used to attempt renew of an internal entry
func (m *ExpirationManager) renewEntry(le *leaseEntry, increment time.Duration) (*logical.Response, error) {
secret := *le.Secret
secret.IssueTime = le.IssueTime
secret.Increment = increment
secret.LeaseID = ""
req := logical.RenewRequest(le.Path, &secret, le.Data)
resp, err := m.router.Route(m.quitContext, req)
if err != nil || (resp != nil && resp.IsError()) {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to renew entry: resp: %#v err: {{err}}", resp), err)
}
return resp, nil
}
// renewAuthEntry is used to attempt renew of an auth entry. Only the token
// store should get the actual token ID intact.
func (m *ExpirationManager) renewAuthEntry(req *logical.Request, le *leaseEntry, increment time.Duration) (*logical.Response, error) {
auth := *le.Auth
auth.IssueTime = le.IssueTime
auth.Increment = increment
if strings.HasPrefix(le.Path, "auth/token/") {
auth.ClientToken = le.ClientToken
} else {
auth.ClientToken = ""
}
authReq := logical.RenewAuthRequest(le.Path, &auth, nil)
authReq.Connection = req.Connection
resp, err := m.router.Route(m.quitContext, authReq)
if err != nil {
return nil, errwrap.Wrapf("failed to renew entry: {{err}}", err)
}
return resp, nil
}
// loadEntry is used to read a lease entry
func (m *ExpirationManager) loadEntry(leaseID string) (*leaseEntry, error) {
// Take out the lease locks after we ensure we are in restore mode
restoreMode := m.inRestoreMode()
if restoreMode {
m.restoreModeLock.RLock()
defer m.restoreModeLock.RUnlock()
restoreMode = m.inRestoreMode()
if restoreMode {
m.lockLease(leaseID)
defer m.unlockLease(leaseID)
}
}
return m.loadEntryInternal(leaseID, restoreMode, true)
}
// loadEntryInternal is used when you need to load an entry but also need to
// control the lifecycle of the restoreLock
func (m *ExpirationManager) loadEntryInternal(leaseID string, restoreMode bool, checkRestored bool) (*leaseEntry, error) {
out, err := m.idView.Get(m.quitContext, leaseID)
if err != nil {
return nil, errwrap.Wrapf("failed to read lease entry: {{err}}", err)
}
if out == nil {
return nil, nil
}
le, err := decodeLeaseEntry(out.Value)
if err != nil {
return nil, errwrap.Wrapf("failed to decode lease entry: {{err}}", err)
}
if restoreMode {
if checkRestored {
// If we have already loaded this lease, we don't need to update on
// load. In the case of renewal and revocation, updatePending will be
// done after making the appropriate modifications to the lease.
if _, ok := m.restoreLoaded.Load(leaseID); ok {
return le, nil
}
}
// Update the cache of restored leases, either synchronously or through
// the lazy loaded restore process
m.restoreLoaded.Store(le.LeaseID, struct{}{})
// Setup revocation timer
m.updatePending(le, le.ExpireTime.Sub(time.Now()))
}
return le, nil
}
// persistEntry is used to persist a lease entry
func (m *ExpirationManager) persistEntry(le *leaseEntry) error {
// Encode the entry
buf, err := le.encode()
if err != nil {
return errwrap.Wrapf("failed to encode lease entry: {{err}}", err)
}
// Write out to the view
ent := logical.StorageEntry{
Key: le.LeaseID,
Value: buf,
}
if le.Auth != nil && len(le.Auth.Policies) == 1 && le.Auth.Policies[0] == "root" {
ent.SealWrap = true
}
if err := m.idView.Put(m.quitContext, &ent); err != nil {
return errwrap.Wrapf("failed to persist lease entry: {{err}}", err)
}
return nil
}
// deleteEntry is used to delete a lease entry
func (m *ExpirationManager) deleteEntry(leaseID string) error {
if err := m.idView.Delete(m.quitContext, leaseID); err != nil {
return errwrap.Wrapf("failed to delete lease entry: {{err}}", err)
}
return nil
}
// createIndexByToken creates a secondary index from the token to a lease entry
func (m *ExpirationManager) createIndexByToken(token, leaseID string) error {
saltedID, err := m.tokenStore.SaltID(m.quitContext, token)
if err != nil {
return err
}
leaseSaltedID, err := m.tokenStore.SaltID(m.quitContext, leaseID)
if err != nil {
return err
}
ent := logical.StorageEntry{
Key: saltedID + "/" + leaseSaltedID,
Value: []byte(leaseID),
}
if err := m.tokenView.Put(m.quitContext, &ent); err != nil {
return errwrap.Wrapf("failed to persist lease index entry: {{err}}", err)
}
return nil
}
// indexByToken looks up the secondary index from the token to a lease entry
func (m *ExpirationManager) indexByToken(token, leaseID string) (*logical.StorageEntry, error) {
saltedID, err := m.tokenStore.SaltID(m.quitContext, token)
if err != nil {
return nil, err
}
leaseSaltedID, err := m.tokenStore.SaltID(m.quitContext, leaseID)
if err != nil {
return nil, err
}
key := saltedID + "/" + leaseSaltedID
entry, err := m.tokenView.Get(m.quitContext, key)
if err != nil {
return nil, fmt.Errorf("failed to look up secondary index entry")
}
return entry, nil
}
// removeIndexByToken removes the secondary index from the token to a lease entry
func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error {
saltedID, err := m.tokenStore.SaltID(m.quitContext, token)
if err != nil {
return err
}
leaseSaltedID, err := m.tokenStore.SaltID(m.quitContext, leaseID)
if err != nil {
return err
}
key := saltedID + "/" + leaseSaltedID
if err := m.tokenView.Delete(m.quitContext, key); err != nil {
return errwrap.Wrapf("failed to delete lease index entry: {{err}}", err)
}
return nil
}
// CreateOrFetchRevocationLeaseByToken is used to create or fetch the matching
// leaseID for a particular token. The lease is set to expire immediately after
// it's created.
func (m *ExpirationManager) CreateOrFetchRevocationLeaseByToken(te *logical.TokenEntry) (string, error) {
// Fetch the saltedID of the token and construct the leaseID
saltedID, err := m.tokenStore.SaltID(m.quitContext, te.ID)
if err != nil {
return "", err
}
leaseID := path.Join(te.Path, saltedID)
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
return "", err
}
// If there's no associated leaseEntry for the token, we create one
if le == nil {
auth := &logical.Auth{
ClientToken: te.ID,
LeaseOptions: logical.LeaseOptions{
TTL: time.Nanosecond,
},
}
if strings.Contains(te.Path, "..") {
return "", consts.ErrPathContainsParentReferences
}
// Create a lease entry
now := time.Now()
le = &leaseEntry{
LeaseID: leaseID,
ClientToken: auth.ClientToken,
Auth: auth,
Path: te.Path,
IssueTime: now,
ExpireTime: now.Add(time.Nanosecond),
}
// Encode the entry
if err := m.persistEntry(le); err != nil {
return "", err
}
}
return le.LeaseID, nil
}
// lookupLeasesByToken is used to lookup all the leaseID's via the tokenID
func (m *ExpirationManager) lookupLeasesByToken(token string) ([]string, error) {
saltedID, err := m.tokenStore.SaltID(m.quitContext, token)
if err != nil {
return nil, err
}
// Scan via the index for sub-leases
prefix := saltedID + "/"
subKeys, err := m.tokenView.List(m.quitContext, prefix)
if err != nil {
return nil, errwrap.Wrapf("failed to list leases: {{err}}", err)
}
// Read each index entry
leaseIDs := make([]string, 0, len(subKeys))
for _, sub := range subKeys {
out, err := m.tokenView.Get(m.quitContext, prefix+sub)
if err != nil {
return nil, errwrap.Wrapf("failed to read lease index: {{err}}", err)
}
if out == nil {
continue
}
leaseIDs = append(leaseIDs, string(out.Value))
}
return leaseIDs, nil
}
// emitMetrics is invoked periodically to emit statistics
func (m *ExpirationManager) emitMetrics() {
m.pendingLock.RLock()
num := len(m.pending)
m.pendingLock.RUnlock()
metrics.SetGauge([]string{"expire", "num_leases"}, float32(num))
// Check if lease count is greater than the threshold
if num > maxLeaseThreshold {
if atomic.LoadUint32(m.leaseCheckCounter) > 59 {
m.logger.Warn("lease count exceeds warning lease threshold")
atomic.StoreUint32(m.leaseCheckCounter, 0)
} else {
atomic.AddUint32(m.leaseCheckCounter, 1)
}
}
}
// leaseEntry is used to structure the values the expiration
// manager stores. This is used to handle renew and revocation.
type leaseEntry struct {
LeaseID string `json:"lease_id"`
ClientToken string `json:"client_token"`
Path string `json:"path"`
Data map[string]interface{} `json:"data"`
Secret *logical.Secret `json:"secret"`
Auth *logical.Auth `json:"auth"`
IssueTime time.Time `json:"issue_time"`
ExpireTime time.Time `json:"expire_time"`
LastRenewalTime time.Time `json:"last_renewal_time"`
}
// encode is used to JSON encode the lease entry
func (le *leaseEntry) encode() ([]byte, error) {
return json.Marshal(le)
}
func (le *leaseEntry) renewable() (bool, error) {
var err error
switch {
// If there is no entry, cannot review
case le == nil || le.ExpireTime.IsZero():
err = fmt.Errorf("lease not found or lease is not renewable")
// Determine if the lease is expired
case le.ExpireTime.Before(time.Now()):
err = fmt.Errorf("lease expired")
// Determine if the lease is renewable
case le.Secret != nil && !le.Secret.Renewable:
err = fmt.Errorf("lease is not renewable")
case le.Auth != nil && !le.Auth.Renewable:
err = fmt.Errorf("lease is not renewable")
}
if err != nil {
return false, err
}
return true, nil
}
func (le *leaseEntry) ttl() int64 {
return int64(le.ExpireTime.Sub(time.Now().Round(time.Second)).Seconds())
}
// decodeLeaseEntry is used to reverse encode and return a new entry
func decodeLeaseEntry(buf []byte) (*leaseEntry, error) {
out := new(leaseEntry)
return out, jsonutil.DecodeJSON(buf, out)
}
|
[
"\"VAULT_SKIP_LOGGING_LEASE_EXPIRATIONS\""
] |
[] |
[
"VAULT_SKIP_LOGGING_LEASE_EXPIRATIONS"
] |
[]
|
["VAULT_SKIP_LOGGING_LEASE_EXPIRATIONS"]
|
go
| 1 | 0 | |
http2/h2c/h2c.go
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package h2c implements the unencrypted "h2c" form of HTTP/2.
//
// The h2c protocol is the non-TLS version of HTTP/2 which is not available from
// net/http or golang.org/x/net/http2.
package h2c
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"net/textproto"
"github.com/SandwichDev/net/http"
"github.com/SandwichDev/net/http/httpguts"
"github.com/SandwichDev/net/http2"
"github.com/SandwichDev/net/http2/hpack"
)
var (
http2VerboseLogs bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") {
http2VerboseLogs = true
}
}
// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic
// that should be h2c traffic. There are two ways to begin a h2c connection
// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this
// works by starting an h2c connection with a string of bytes that is valid
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server
}
// NewHandler returns an http.Handler that wraps h, intercepting any h2c
// traffic. If a request is an h2c connection, it's hijacked and redirected to
// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This
// works because h2c is designed to be parseable as valid HTTP/1, but ignored by
// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1
// compatible parts of the Go http library to parse and recognize h2c requests.
// Once a request is recognized as h2c, we hijack the connection and convert it
// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn
// understands HTTP/2 except for the h2c part of it.)
func NewHandler(h http.Handler, s *http2.Server) http.Handler {
return &h2cHandler{
Handler: h,
s: s,
}
}
// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler.
func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Handle h2c with prior knowledge (RFC 7540 Section 3.4)
if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" {
if http2VerboseLogs {
log.Print("h2c: attempting h2c with prior knowledge.")
}
conn, err := initH2CWithPriorKnowledge(w)
if err != nil {
if http2VerboseLogs {
log.Printf("h2c: error h2c with prior knowledge: %v", err)
}
return
}
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler})
return
}
// Handle Upgrade to h2c (RFC 7540 Section 3.2)
if conn, err := h2cUpgrade(w, r); err == nil {
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler})
return
}
s.Handler.ServeHTTP(w, r)
return
}
// initH2CWithPriorKnowledge implements creating a h2c connection with prior
// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn.
// All we have to do is look for the client preface that is suppose to be part
// of the body, and reforward the client preface on the net.Conn this function
// creates.
func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
hijacker, ok := w.(http.Hijacker)
if !ok {
panic("Hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
panic(fmt.Sprintf("Hijack failed: %v", err))
}
const expectedBody = "SM\r\n\r\n"
buf := make([]byte, len(expectedBody))
n, err := io.ReadFull(rw, buf)
if err != nil {
return nil, fmt.Errorf("could not read from the buffer: %s", err)
}
if string(buf[:n]) == expectedBody {
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(strings.NewReader(http2.ClientPreface), rw),
BufWriter: rw.Writer,
}
return c, nil
}
conn.Close()
if http2VerboseLogs {
log.Printf(
"h2c: missing the request body portion of the client preface. Wanted: %v Got: %v",
[]byte(expectedBody),
buf[0:n],
)
}
return nil, errors.New("invalid client preface")
}
// drainClientPreface reads a single instance of the HTTP/2 client preface from
// the supplied reader.
func drainClientPreface(r io.Reader) error {
var buf bytes.Buffer
prefaceLen := int64(len(http2.ClientPreface))
n, err := io.CopyN(&buf, r, prefaceLen)
if err != nil {
return err
}
if n != prefaceLen || buf.String() != http2.ClientPreface {
return fmt.Errorf("Client never sent: %s", http2.ClientPreface)
}
return nil
}
// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2).
func h2cUpgrade(w http.ResponseWriter, r *http.Request) (net.Conn, error) {
if !isH2CUpgrade(r.Header) {
return nil, errors.New("non-conforming h2c headers")
}
// Initial bytes we put into conn to fool http2 server
initBytes, _, err := convertH1ReqToH2(r)
if err != nil {
return nil, err
}
hijacker, ok := w.(http.Hijacker)
if !ok {
return nil, errors.New("hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
return nil, fmt.Errorf("hijack failed: %v", err)
}
rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" +
"Connection: Upgrade\r\n" +
"Upgrade: h2c\r\n\r\n"))
rw.Flush()
// A conforming client will now send an H2 client preface which need to drain
// since we already sent this.
if err := drainClientPreface(rw); err != nil {
return nil, err
}
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(initBytes, rw),
BufWriter: newSettingsAckSwallowWriter(rw.Writer),
}
return c, nil
}
// convert the data contained in the HTTP/1 upgrade request into the HTTP/2
// version in byte form.
func convertH1ReqToH2(r *http.Request) (*bytes.Buffer, []http2.Setting, error) {
h2Bytes := bytes.NewBuffer([]byte((http2.ClientPreface)))
framer := http2.NewFramer(h2Bytes, nil)
settings, err := getH2Settings(r.Header)
if err != nil {
return nil, nil, err
}
if err := framer.WriteSettings(settings...); err != nil {
return nil, nil, err
}
headerBytes, err := getH2HeaderBytes(r, getMaxHeaderTableSize(settings))
if err != nil {
return nil, nil, err
}
maxFrameSize := int(getMaxFrameSize(settings))
needOneHeader := len(headerBytes) < maxFrameSize
err = framer.WriteHeaders(http2.HeadersFrameParam{
StreamID: 1,
BlockFragment: headerBytes,
EndHeaders: needOneHeader,
})
if err != nil {
return nil, nil, err
}
for i := maxFrameSize; i < len(headerBytes); i += maxFrameSize {
if len(headerBytes)-i > maxFrameSize {
if err := framer.WriteContinuation(1,
false, // endHeaders
headerBytes[i:maxFrameSize]); err != nil {
return nil, nil, err
}
} else {
if err := framer.WriteContinuation(1,
true, // endHeaders
headerBytes[i:]); err != nil {
return nil, nil, err
}
}
}
return h2Bytes, settings, nil
}
// getMaxFrameSize returns the SETTINGS_MAX_FRAME_SIZE. If not present default
// value is 16384 as specified by RFC 7540 Section 6.5.2.
func getMaxFrameSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingMaxFrameSize {
return setting.Val
}
}
return 16384
}
// getMaxHeaderTableSize returns the SETTINGS_HEADER_TABLE_SIZE. If not present
// default value is 4096 as specified by RFC 7540 Section 6.5.2.
func getMaxHeaderTableSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingHeaderTableSize {
return setting.Val
}
}
return 4096
}
// bufWriter is a Writer interface that also has a Flush method.
type bufWriter interface {
io.Writer
Flush() error
}
// rwConn implements net.Conn but overrides Read and Write so that reads and
// writes are forwarded to the provided io.Reader and bufWriter.
type rwConn struct {
net.Conn
io.Reader
BufWriter bufWriter
}
// Read forwards reads to the underlying Reader.
func (c *rwConn) Read(p []byte) (int, error) {
return c.Reader.Read(p)
}
// Write forwards writes to the underlying bufWriter and immediately flushes.
func (c *rwConn) Write(p []byte) (int, error) {
n, err := c.BufWriter.Write(p)
if err := c.BufWriter.Flush(); err != nil {
return 0, err
}
return n, err
}
// settingsAckSwallowWriter is a writer that normally forwards bytes to its
// underlying Writer, but swallows the first SettingsAck frame that it sees.
type settingsAckSwallowWriter struct {
Writer *bufio.Writer
buf []byte
didSwallow bool
}
// newSettingsAckSwallowWriter returns a new settingsAckSwallowWriter.
func newSettingsAckSwallowWriter(w *bufio.Writer) *settingsAckSwallowWriter {
return &settingsAckSwallowWriter{
Writer: w,
buf: make([]byte, 0),
didSwallow: false,
}
}
// Write implements io.Writer interface. Normally forwards bytes to w.Writer,
// except for the first Settings ACK frame that it sees.
func (w *settingsAckSwallowWriter) Write(p []byte) (int, error) {
if !w.didSwallow {
w.buf = append(w.buf, p...)
// Process all the frames we have collected into w.buf
for {
// Append until we get full frame header which is 9 bytes
if len(w.buf) < 9 {
break
}
// Check if we have collected a whole frame.
fh, err := http2.ReadFrameHeader(bytes.NewBuffer(w.buf))
if err != nil {
// Corrupted frame, fail current Write
return 0, err
}
fSize := fh.Length + 9
if uint32(len(w.buf)) < fSize {
// Have not collected whole frame. Stop processing buf, and withold on
// forward bytes to w.Writer until we get the full frame.
break
}
// We have now collected a whole frame.
if fh.Type == http2.FrameSettings && fh.Flags.Has(http2.FlagSettingsAck) {
// If Settings ACK frame, do not forward to underlying writer, remove
// bytes from w.buf, and record that we have swallowed Settings Ack
// frame.
w.didSwallow = true
w.buf = w.buf[fSize:]
continue
}
// Not settings ack frame. Forward bytes to w.Writer.
if _, err := w.Writer.Write(w.buf[:fSize]); err != nil {
// Couldn't forward bytes. Fail current Write.
return 0, err
}
w.buf = w.buf[fSize:]
}
return len(p), nil
}
return w.Writer.Write(p)
}
// Flush calls w.Writer.Flush.
func (w *settingsAckSwallowWriter) Flush() error {
return w.Writer.Flush()
}
// isH2CUpgrade returns true if the header properly request an upgrade to h2c
// as specified by Section 3.2.
func isH2CUpgrade(h http.Header) bool {
return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") &&
httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings")
}
// getH2Settings returns the []http2.Setting that are encoded in the
// HTTP2-Settings header.
func getH2Settings(h http.Header) ([]http2.Setting, error) {
vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")]
if !ok {
return nil, errors.New("missing HTTP2-Settings header")
}
if len(vals) != 1 {
return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals)
}
settings, err := decodeSettings(vals[0])
if err != nil {
return nil, fmt.Errorf("Invalid HTTP2-Settings: %q", vals[0])
}
return settings, nil
}
// decodeSettings decodes the base64url header value of the HTTP2-Settings
// header. RFC 7540 Section 3.2.1.
func decodeSettings(headerVal string) ([]http2.Setting, error) {
b, err := base64.RawURLEncoding.DecodeString(headerVal)
if err != nil {
return nil, err
}
if len(b)%6 != 0 {
return nil, err
}
settings := make([]http2.Setting, 0)
for i := 0; i < len(b)/6; i++ {
settings = append(settings, http2.Setting{
ID: http2.SettingID(binary.BigEndian.Uint16(b[i*6 : i*6+2])),
Val: binary.BigEndian.Uint32(b[i*6+2 : i*6+6]),
})
}
return settings, nil
}
// getH2HeaderBytes return the headers in r a []bytes encoded by HPACK.
func getH2HeaderBytes(r *http.Request, maxHeaderTableSize uint32) ([]byte, error) {
headerBytes := bytes.NewBuffer(nil)
hpackEnc := hpack.NewEncoder(headerBytes)
hpackEnc.SetMaxDynamicTableSize(maxHeaderTableSize)
// Section 8.1.2.3
err := hpackEnc.WriteField(hpack.HeaderField{
Name: ":method",
Value: r.Method,
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":scheme",
Value: "http",
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":authority",
Value: r.Host,
})
if err != nil {
return nil, err
}
path := r.URL.Path
if r.URL.RawQuery != "" {
path = strings.Join([]string{path, r.URL.RawQuery}, "?")
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":path",
Value: path,
})
if err != nil {
return nil, err
}
// TODO Implement Section 8.3
for header, values := range r.Header {
// Skip non h2 headers
if isNonH2Header(header) {
continue
}
for _, v := range values {
err := hpackEnc.WriteField(hpack.HeaderField{
Name: strings.ToLower(header),
Value: v,
})
if err != nil {
return nil, err
}
}
}
return headerBytes.Bytes(), nil
}
// Connection specific headers listed in RFC 7540 Section 8.1.2.2 that are not
// suppose to be transferred to HTTP/2. The Http2-Settings header is skipped
// since already use to create the HTTP/2 SETTINGS frame.
var nonH2Headers = []string{
"Connection",
"Keep-Alive",
"Proxy-Connection",
"Transfer-Encoding",
"Upgrade",
"Http2-Settings",
}
// isNonH2Header returns true if header should not be transferred to HTTP/2.
func isNonH2Header(header string) bool {
for _, nonH2h := range nonH2Headers {
if header == nonH2h {
return true
}
}
return false
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
|
package imagebuildah
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/containers/buildah"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/rusage"
"github.com/containers/buildah/util"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/chrootarchive"
docker "github.com/fsouza/go-dockerclient"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// StageExecutor bundles up what we need to know when executing one stage of a
// (possibly multi-stage) build.
// Each stage may need to produce an image to be used as the base in a later
// stage (with the last stage's image being the end product of the build), and
// it may need to leave its working container in place so that the container's
// root filesystem's contents can be used as the source for a COPY instruction
// in a later stage.
// Each stage has its own base image, so it starts with its own configuration
// and set of volumes.
// If we're naming the result of the build, only the last stage will apply that
// name to the image that it produces.
type StageExecutor struct {
ctx context.Context
executor *Executor
log func(format string, args ...interface{})
index int
stages imagebuilder.Stages
name string
builder *buildah.Builder
preserved int
volumes imagebuilder.VolumeSet
volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo
mountPoint string
output string
containerIDs []string
stage *imagebuilder.Stage
}
// Preserve informs the stage executor that from this point on, it needs to
// ensure that only COPY and ADD instructions can modify the contents of this
// directory or anything below it.
// The StageExecutor handles this by caching the contents of directories which
// have been marked this way before executing a RUN instruction, invalidating
// that cache when an ADD or COPY instruction sets any location under the
// directory as the destination, and using the cache to reset the contents of
// the directory tree after processing each RUN instruction.
// It would be simpler if we could just mark the directory as a read-only bind
// mount of itself during Run(), but the directory is expected to be remain
// writeable while the RUN instruction is being handled, even if any changes
// made within the directory are ultimately discarded.
func (s *StageExecutor) Preserve(path string) error {
logrus.Debugf("PRESERVE %q", path)
if s.volumes.Covers(path) {
// This path is already a subdirectory of a volume path that
// we're already preserving, so there's nothing new to be done
// except ensure that it exists.
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return errors.Wrapf(err, "error ensuring volume path exists")
}
if err := s.volumeCacheInvalidate(path); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q is preserved", filepath.Join(s.mountPoint, path))
}
return nil
}
// Figure out where the cache for this volume would be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return errors.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
// Save info about the top level of the location that we'll be archiving.
var archivedPath string
// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
symLink, err := filepath.Rel(s.mountPoint, evaluated)
if err != nil {
return errors.Wrapf(err, "making evaluated path %q relative to %q", evaluated, s.mountPoint)
}
if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) {
return errors.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
}
archivedPath = evaluated
path = string(os.PathSeparator) + symLink
} else {
return errors.Wrapf(err, "error evaluating path %q", path)
}
st, err := os.Stat(archivedPath)
if os.IsNotExist(err) {
createdDirPerms := os.FileMode(0755)
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return errors.Wrapf(err, "error ensuring volume path exists")
}
st, err = os.Stat(archivedPath)
}
if err != nil {
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
return err
}
s.volumeCacheInfo[path] = st
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should work.
return errors.Errorf("error adding %q to the volume cache", path)
}
s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are now supplanted by this one.
removed := []string{}
for cachedPath := range s.volumeCache {
// Walk our list of cached volumes, and check that they're
// still in the list of locations that we need to cache.
found := false
for _, volume := range s.volumes {
if volume == cachedPath {
// We need to keep this volume's cache.
found = true
break
}
}
if !found {
// We don't need to keep this volume's cache. Make a
// note to remove it.
removed = append(removed, cachedPath)
}
}
// Actually remove the caches that we decided to remove.
for _, cachedPath := range removed {
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath])
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
delete(s.volumeCache, cachedPath)
}
return nil
}
// Remove any volume cache item which will need to be re-saved because we're
// writing to part of it.
func (s *StageExecutor) volumeCacheInvalidate(path string) error {
invalidated := []string{}
for cachedPath := range s.volumeCache {
if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) {
invalidated = append(invalidated, cachedPath)
}
}
for _, cachedPath := range invalidated {
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("invalidated volume cache %q for %q from %q", archivedPath, path, s.volumeCache[cachedPath])
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error evaluating volume path")
}
relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
if err != nil {
return nil, errors.Wrapf(err, "error converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
return nil, errors.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
_, err = os.Stat(cacheFile)
if err == nil {
logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
continue
}
if !os.IsNotExist(err) {
return nil, err
}
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return nil, errors.Wrapf(err, "error ensuring volume path exists")
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
return nil, err
}
defer cache.Close()
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
if err != nil {
return nil, errors.Wrapf(err, "error archiving %q", archivedPath)
}
defer rc.Close()
_, err = io.Copy(cache, rc)
if err != nil {
return nil, errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
}
mount := specs.Mount{
Source: archivedPath,
Destination: string(os.PathSeparator) + relativePath,
Type: "bind",
Options: []string{"private"},
}
mounts = append(mounts, mount)
}
return nil, nil
}
// Restore the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
return errors.Wrapf(err, "error evaluating volume path")
}
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
return err
}
defer cache.Close()
if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
return err
}
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return err
}
err = chrootarchive.Untar(cache, archivedPath, nil)
if err != nil {
return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
}
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
return err
}
uid := 0
gid := 0
if st.Sys() != nil {
uid = util.UID(st)
gid = util.GID(st)
}
if err := os.Chown(archivedPath, uid, gid); err != nil {
return err
}
if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil {
return err
}
}
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
for cachedPath := range s.volumeCache {
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
if err != nil {
return nil, errors.Wrapf(err, "ensuring volume exists")
}
volumePath := filepath.Join(s.mountPoint, cachedPath)
mount := specs.Mount{
Source: volumePath,
Destination: cachedPath,
Options: []string{"O", "private"},
}
mounts = append(mounts, mount)
}
return mounts, nil
}
// Reset the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestoreOverlay() error {
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSave() (mounts []specs.Mount, err error) {
switch s.executor.store.GraphDriverName() {
case "overlay":
return s.volumeCacheSaveOverlay()
}
return s.volumeCacheSaveVFS()
}
// Reset the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestore() error {
switch s.executor.store.GraphDriverName() {
case "overlay":
return s.volumeCacheRestoreOverlay()
}
return s.volumeCacheRestoreVFS()
}
// Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
s.builder.ContentDigester.Restart()
for _, copy := range copies {
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else {
logrus.Debugf("COPY %#v, %#v", excludes, copy)
}
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
return err
}
var sources []string
// The From field says to read the content from another
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
var idMappingOptions *define.IDMappingOptions
var copyExcludes []string
stripSetuid := false
stripSetgid := false
preserveOwnership := false
contextDir := s.executor.contextDir
if len(copy.From) > 0 {
// If from has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
if fromErr != nil {
return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
}
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return err
}
if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
preserveOwnership = true
copyExcludes = excludes
} else {
copyExcludes = append(s.executor.excludes, excludes...)
stripSetuid = true // did this change between 18.06 and 19.03?
stripSetgid = true // did this change between 18.06 and 19.03?
}
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL, allowed for ADD but not COPY.
if copy.Download {
sources = append(sources, src)
} else {
// returns an error to be compatible with docker
return errors.Errorf("source can't be a URL for COPY")
}
} else {
sources = append(sources, filepath.Join(contextDir, src))
}
}
options := buildah.AddAndCopyOptions{
Chmod: copy.Chmod,
Chown: copy.Chown,
PreserveOwnership: preserveOwnership,
ContextDir: contextDir,
Excludes: copyExcludes,
IgnoreFile: s.executor.ignoreFile,
IDMappingOptions: idMappingOptions,
StripSetuidBit: stripSetuid,
StripSetgidBit: stripSetgid,
}
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
return err
}
}
return nil
}
// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided
// Stage can automatically cleanup this mounts when a stage is removed
// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run.
// stages mounted here will we used be Run().
func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) {
stageMountPoints := make(map[string]internal.StageMountDetails)
for _, flag := range mountList {
if strings.Contains(flag, "from") {
arr := strings.SplitN(flag, ",", 2)
if len(arr) < 2 {
return nil, errors.Errorf("Invalid --mount command: %s", flag)
}
tokens := strings.Split(arr[1], ",")
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "from":
if len(kv) == 1 {
return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument")
}
if kv[1] == "" {
return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value")
}
from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments())
if fromErr != nil {
return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1])
}
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return nil, err
}
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: otherStage.mountPoint}
break
} else {
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from)
}
stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint}
break
}
default:
continue
}
}
}
}
return stageMountPoints, nil
}
// Run executes a RUN instruction using the stage's current working container
// as a root directory.
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
logrus.Debugf("RUN %#v, %#v", run, config)
stageMountPoints, err := s.runStageMountPoints(run.Mounts)
if err != nil {
return err
}
if s.builder == nil {
return errors.Errorf("no build container available")
}
stdin := s.executor.in
if stdin == nil {
devNull, err := os.Open(os.DevNull)
if err != nil {
return errors.Errorf("error opening %q for reading: %v", os.DevNull, err)
}
defer devNull.Close()
stdin = devNull
}
options := buildah.RunOptions{
Logger: s.executor.logger,
Hostname: config.Hostname,
Runtime: s.executor.runtime,
Args: s.executor.runtimeArgs,
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
Mounts: append([]Mount{}, s.executor.transientMounts...),
Env: config.Env,
User: config.User,
WorkingDir: config.WorkingDir,
Entrypoint: config.Entrypoint,
ContextDir: s.executor.contextDir,
Cmd: config.Cmd,
Stdin: stdin,
Stdout: s.executor.out,
Stderr: s.executor.err,
Quiet: s.executor.quiet,
NamespaceOptions: s.executor.namespaceOptions,
Terminal: buildah.WithoutTerminal,
Secrets: s.executor.secrets,
SSHSources: s.executor.sshsources,
RunMounts: run.Mounts,
StageMountPoints: stageMountPoints,
SystemContext: s.executor.systemContext,
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
} else {
options.ConfigureNetwork = buildah.NetworkEnabled
}
args := run.Args
if run.Shell {
if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest {
args = append(config.Shell, args...)
} else {
args = append([]string{"/bin/sh", "-c"}, args...)
}
}
mounts, err := s.volumeCacheSave()
if err != nil {
return err
}
options.Mounts = append(options.Mounts, mounts...)
err = s.builder.Run(args, options)
if err2 := s.volumeCacheRestore(); err2 != nil {
if err == nil {
return err2
}
}
return err
}
// UnrecognizedInstruction is called when we encounter an instruction that the
// imagebuilder parser didn't understand.
func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command))
err := fmt.Sprintf(errStr+"%#v", step)
if s.executor.ignoreUnrecognizedInstructions {
logrus.Debugf(err)
return nil
}
switch logrus.GetLevel() {
case logrus.ErrorLevel:
s.executor.logger.Errorf(errStr)
case logrus.DebugLevel:
logrus.Debugf(err)
default:
s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
}
return errors.Errorf(err)
}
// prepare creates a working container based on the specified image, or if one
// isn't specified, the first argument passed to the first FROM instruction we
// can find in the stage's parsed tree.
func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBConfig, rebase bool, pullPolicy define.PullPolicy) (builder *buildah.Builder, err error) {
stage := s.stage
ib := stage.Builder
node := stage.Node
if from == "" {
base, err := ib.From(node)
if err != nil {
logrus.Debugf("prepare(node.Children=%#v)", node.Children)
return nil, errors.Wrapf(err, "error determining starting point for build")
}
from = base
}
displayFrom := from
// stage.Name will be a numeric string for all stages without an "AS" clause
asImageName := stage.Name
if asImageName != "" {
if _, err := strconv.Atoi(asImageName); err != nil {
displayFrom = from + " AS " + asImageName
}
}
if initializeIBConfig && rebase {
logrus.Debugf("FROM %#v", displayFrom)
if !s.executor.quiet {
s.log("FROM %s", displayFrom)
}
}
builderSystemContext := s.executor.systemContext
// get platform string from stage
if stage.Builder.Platform != "" {
os, arch, variant, err := parse.Platform(stage.Builder.Platform)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform)
}
if arch != "" || variant != "" {
builderSystemContext.ArchitectureChoice = arch
builderSystemContext.VariantChoice = variant
}
if os != "" {
builderSystemContext.OSChoice = os
}
}
builderOptions := buildah.BuilderOptions{
Args: ib.Args,
FromImage: from,
PullPolicy: pullPolicy,
ContainerSuffix: s.executor.containerSuffix,
Registry: s.executor.registry,
BlobDirectory: s.executor.blobDirectory,
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: s.executor.reportWriter,
SystemContext: builderSystemContext,
Isolation: s.executor.isolation,
NamespaceOptions: s.executor.namespaceOptions,
ConfigureNetwork: s.executor.configureNetwork,
CNIPluginPath: s.executor.cniPluginPath,
CNIConfigDir: s.executor.cniConfigDir,
NetworkInterface: s.executor.networkInterface,
IDMappingOptions: s.executor.idmappingOptions,
CommonBuildOpts: s.executor.commonBuildOptions,
DefaultMountsFilePath: s.executor.defaultMountsFilePath,
Format: s.executor.outputFormat,
Capabilities: s.executor.capabilities,
Devices: s.executor.devices,
MaxPullRetries: s.executor.maxPullPushRetries,
PullRetryDelay: s.executor.retryPullPushDelay,
OciDecryptConfig: s.executor.ociDecryptConfig,
Logger: s.executor.logger,
ProcessLabel: s.executor.processLabel,
MountLabel: s.executor.mountLabel,
}
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
return nil, errors.Wrapf(err, "error creating build container")
}
// If executor's ProcessLabel and MountLabel is empty means this is the first stage
// Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages
// Doing this will ensure and one stage in same build can mount another stage even if `selinux`
// is enabled.
if s.executor.mountLabel == "" && s.executor.processLabel == "" {
s.executor.mountLabel = builder.MountLabel
s.executor.processLabel = builder.ProcessLabel
}
if initializeIBConfig {
volumes := map[string]struct{}{}
for _, v := range builder.Volumes() {
volumes[v] = struct{}{}
}
ports := map[docker.Port]struct{}{}
for _, p := range builder.Ports() {
ports[docker.Port(p)] = struct{}{}
}
dConfig := docker.Config{
Hostname: builder.Hostname(),
Domainname: builder.Domainname(),
User: builder.User(),
Env: builder.Env(),
Cmd: builder.Cmd(),
Image: from,
Volumes: volumes,
WorkingDir: builder.WorkDir(),
Entrypoint: builder.Entrypoint(),
Labels: builder.Labels(),
Shell: builder.Shell(),
StopSignal: builder.StopSignal(),
OnBuild: builder.OnBuild(),
ExposedPorts: ports,
}
var rootfs *docker.RootFS
if builder.Docker.RootFS != nil {
rootfs = &docker.RootFS{
Type: builder.Docker.RootFS.Type,
}
for _, id := range builder.Docker.RootFS.DiffIDs {
rootfs.Layers = append(rootfs.Layers, id.String())
}
}
dImage := docker.Image{
Parent: builder.FromImage,
ContainerConfig: dConfig,
Container: builder.Container,
Author: builder.Maintainer(),
Architecture: builder.Architecture(),
RootFS: rootfs,
}
dImage.Config = &dImage.ContainerConfig
err = ib.FromImage(&dImage, node)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to update: %v", err2)
}
return nil, errors.Wrapf(err, "error updating build context")
}
}
mountPoint, err := builder.Mount(builder.MountLabel)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
}
return nil, errors.Wrapf(err, "error mounting new container")
}
if rebase {
// Make this our "current" working container.
s.mountPoint = mountPoint
s.builder = builder
}
logrus.Debugln("Container ID:", builder.ContainerID)
return builder, nil
}
// Delete deletes the stage's working container, if we have one.
func (s *StageExecutor) Delete() (err error) {
if s.builder != nil {
err = s.builder.Delete()
s.builder = nil
}
return err
}
// stepRequiresLayer indicates whether or not the step should be followed by
// committing a layer container when creating an intermediate image.
func (*StageExecutor) stepRequiresLayer(step *imagebuilder.Step) bool {
switch strings.ToUpper(step.Command) {
case "ADD", "COPY", "RUN":
return true
}
return false
}
// getImageRootfs checks for an image matching the passed-in name in local
// storage. If it isn't found, it pulls down a copy. Then, if we don't have a
// working container root filesystem based on the image, it creates one. Then
// it returns that root filesystem's location.
func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mountPoint string, err error) {
if builder, ok := s.executor.containerMap[image]; ok {
return builder.MountPoint, nil
}
builder, err := s.prepare(ctx, image, false, false, s.executor.pullPolicy)
if err != nil {
return "", err
}
s.executor.containerMap[image] = builder
return builder.MountPoint, nil
}
// Execute runs each of the steps in the stage's parsed tree, in turn.
func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) {
var resourceUsage rusage.Rusage
stage := s.stage
ib := stage.Builder
checkForLayers := s.executor.layers && s.executor.useCache
moreStages := s.index < len(s.stages)-1
lastStage := !moreStages
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)])
// If the base image's name corresponds to the result of an earlier
// stage, make sure that stage has finished building an image, and
// substitute that image's ID for the base image's name here and force
// the pull policy to "never" to avoid triggering an error when it's
// set to "always", which doesn't make sense for image IDs.
// If not, then go on assuming that it's just a regular image that's
// either in local storage, or one that we have to pull from a
// registry, subject to the passed-in pull policy.
if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil {
return "", nil, err
}
pullPolicy := s.executor.pullPolicy
s.executor.stagesLock.Lock()
if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage {
base = stageImage
pullPolicy = define.PullNever
}
s.executor.stagesLock.Unlock()
// Set things up so that we can log resource usage as we go.
logRusage := func() {
if rusage.Supported() {
usage, err := rusage.Get()
if err != nil {
fmt.Fprintf(s.executor.out, "error gathering resource usage information: %v\n", err)
return
}
if s.executor.rusageLogFile != nil {
fmt.Fprintf(s.executor.rusageLogFile, "%s\n", rusage.FormatDiff(usage.Subtract(resourceUsage)))
}
resourceUsage = usage
}
}
// Start counting resource usage before we potentially pull a base image.
if rusage.Supported() {
if resourceUsage, err = rusage.Get(); err != nil {
return "", nil, err
}
// Log the final incremental resource usage counter before we return.
defer logRusage()
}
// Create the (first) working container for this stage. Reinitializing
// the imagebuilder configuration may alter the list of steps we have,
// so take a snapshot of them *after* that.
if _, err := s.prepare(ctx, base, true, true, pullPolicy); err != nil {
return "", nil, err
}
children := stage.Node.Children
// A helper function to only log "COMMIT" as an explicit step if it's
// the very last step of a (possibly multi-stage) build.
logCommit := func(output string, instruction int) {
moreInstructions := instruction < len(children)-1
if moreInstructions || moreStages {
return
}
commitMessage := "COMMIT"
if output != "" {
commitMessage = fmt.Sprintf("%s %s", commitMessage, output)
}
logrus.Debugf(commitMessage)
if !s.executor.quiet {
s.log(commitMessage)
}
}
logCacheHit := func(cacheID string) {
if !s.executor.quiet {
cacheHitMessage := "--> Using cache"
fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID)
}
}
logImageID := func(imgID string) {
if len(imgID) > 11 {
imgID = imgID[0:11]
}
if s.executor.iidfile == "" {
fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
}
}
if len(children) == 0 {
// There are no steps.
if s.builder.FromImageID == "" || s.executor.squash {
// We either don't have a base image, or we need to
// squash the contents of the base image. Whichever is
// the case, we need to commit() to create a new image.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output); err != nil {
return "", nil, errors.Wrapf(err, "error committing base container")
}
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
// The image would be modified by the labels passed
// via the command line, so we need to commit.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output); err != nil {
return "", nil, err
}
} else {
// We don't need to squash the base image, and the
// image wouldn't be modified by the command line
// options, so just reuse the base image.
logCommit(s.output, -1)
if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
return "", nil, err
}
}
logImageID(imgID)
}
for i, node := range children {
logRusage()
moreInstructions := i < len(children)-1
lastInstruction := !moreInstructions
// Resolve any arguments in this instruction.
step := ib.Step()
if err := step.Resolve(node); err != nil {
return "", nil, errors.Wrapf(err, "error resolving step %+v", *node)
}
logrus.Debugf("Parsed Step: %+v", *step)
if !s.executor.quiet {
s.log("%s", step.Original)
}
// Check if there's a --from if the step command is COPY.
// Also check the chmod and the chown flags for validity.
for _, flag := range step.Flags {
command := strings.ToUpper(step.Command)
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
return "", nil, errors.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
}
if command == "ADD" && (flag == "--chmod" || flag == "--chown") {
return "", nil, errors.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
}
if strings.Contains(flag, "--from") && command == "COPY" {
arr := strings.Split(flag, "=")
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
// If arr[1] has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
if fromErr != nil {
return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
}
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
return "", nil, err
}
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
break
} else if _, err = s.getImageRootfs(ctx, from); err != nil {
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
break
}
}
// Determine if there are any RUN instructions to be run after
// this step. If not, we won't have to bother preserving the
// contents of any volumes declared between now and when we
// finish.
noRunsRemaining := false
if moreInstructions {
noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]})
}
// If we're doing a single-layer build, just process the
// instruction.
if !s.executor.layers {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
addedContentSummary := addedContentType
if addedContentDigest != "" {
if addedContentSummary != "" {
addedContentSummary = addedContentSummary + ":"
}
addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
logrus.Debugf("added content %s", addedContentSummary)
}
if moreInstructions {
// There are still more instructions to process
// for this stage. Make a note of the
// instruction in the history that we'll write
// for the image when we eventually commit it.
timestamp := time.Now().UTC()
if s.executor.timestamp != nil {
timestamp = *s.executor.timestamp
}
s.builder.AddPrependedEmptyLayer(×tamp, s.getCreatedBy(node, addedContentSummary), "", "")
continue
} else {
// This is the last instruction for this stage,
// so we should commit this container to create
// an image, but only if it's the last stage,
// or if it's used as the basis for a later
// stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
logImageID(imgID)
} else {
imgID = ""
}
break
}
}
// We're in a multi-layered build.
var (
commitName string
cacheID string
err error
rebase bool
addedContentSummary string
)
// If we have to commit for this instruction, only assign the
// stage's configured output name to the last layer.
if lastInstruction {
commitName = s.output
}
// Check if there's already an image based on our parent that
// has the same change that we're about to make, so far as we
// can tell.
// Only do this if the step we are on is not an ARG step,
// we need to call ib.Run() to correctly put the args together before
// determining if a cached layer with the same build args already exists
// and that is done in the if block below.
if checkForLayers && step.Command != "arg" {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
}
// If we didn't find a cache entry, or we need to add content
// to find the digest of the content to check for a cached
// image, run the step so that we can check if the result
// matches a cache.
if cacheID == "" {
// Process the instruction directly.
if err = ib.Run(step, s, noRunsRemaining); err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
addedContentSummary = addedContentType
if addedContentDigest != "" {
if addedContentSummary != "" {
addedContentSummary = addedContentSummary + ":"
}
addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
logrus.Debugf("added content %s", addedContentSummary)
}
// Check if there's already an image based on our parent that
// has the same change that we just made.
if checkForLayers {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
}
} else {
// If the instruction would affect our configuration,
// process the configuration change so that, if we fall
// off the cache path, the filesystem changes from the
// last cache image will be all that we need, since we
// still don't want to restart using the image's
// configuration blob.
if !s.stepRequiresLayer(step) {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
}
}
// We want to save history for other layers during a squashed build.
// Toggle flag allows executor to treat other instruction and layers
// as regular builds and only perform squashing at last
squashToggle := false
// Note: If the build has squash, we must try to re-use as many layers as possible if cache is found.
// So only perform commit if its the lastInstruction of lastStage.
if cacheID != "" {
logCacheHit(cacheID)
// A suitable cached image was found, so we can just
// reuse it. If we need to add a name to the resulting
// image because it's the last step in this stage, add
// the name to the image.
imgID = cacheID
if commitName != "" {
logCommit(commitName, i)
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
return "", nil, err
}
}
} else {
if s.executor.squash {
// We want to save history for other layers during a squashed build.
// squashToggle flag allows executor to treat other instruction and layers
// as regular builds and only perform squashing at last
s.executor.squash = false
squashToggle = true
}
// We're not going to find any more cache hits, so we
// can stop looking for them.
checkForLayers = false
// Create a new image, maybe with a new layer, with the
// name for this stage if it's the last instruction.
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
}
// Perform final squash for this build as we are one the,
// last instruction of last stage
if (s.executor.squash || squashToggle) && lastInstruction && lastStage {
s.executor.squash = true
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step)
}
}
logImageID(imgID)
// Update our working container to be based off of the cached
// image, if we might need to use it as a basis for the next
// instruction, or if we need the root filesystem to match the
// image contents for the sake of a later stage that wants to
// copy content from it.
rebase = moreInstructions || rootfsIsUsedLater
if rebase {
// Since we either committed the working container or
// are about to replace it with one based on a cached
// image, add the current working container's ID to the
// list of successful intermediate containers that
// we'll clean up later.
s.containerIDs = append(s.containerIDs, s.builder.ContainerID)
// Prepare for the next step or subsequent phases by
// creating a new working container with the
// just-committed or updated cached image as its new
// base image.
// Enforce pull "never" since we already have an image
// ID that we really should not be pulling anymore (see
// containers/podman/issues/10307).
if _, err := s.prepare(ctx, imgID, false, true, define.PullNever); err != nil {
return "", nil, errors.Wrap(err, "error preparing container for next step")
}
}
}
return imgID, ref, nil
}
func historyEntriesEqual(base, derived v1.History) bool {
if base.CreatedBy != derived.CreatedBy {
return false
}
if base.Comment != derived.Comment {
return false
}
if base.Author != derived.Author {
return false
}
if base.EmptyLayer != derived.EmptyLayer {
return false
}
if base.Created != nil && derived.Created == nil {
return false
}
if base.Created == nil && derived.Created != nil {
return false
}
if base.Created != nil && derived.Created != nil && !base.Created.Equal(*derived.Created) {
return false
}
return true
}
// historyAndDiffIDsMatch returns true if a candidate history matches the
// history of our base image (if we have one), plus the current instruction,
// and if the list of diff IDs for the images do for the part of the history
// that we're comparing.
// Used to verify whether a cache of the intermediate image exists and whether
// to run the build again.
func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) bool {
// our history should be as long as the base's, plus one entry for what
// we're doing
if len(history) != len(baseHistory)+1 {
return false
}
// check that each entry in the base history corresponds to an entry in
// our history, and count how many of them add a layer diff
expectedDiffIDs := 0
for i := range baseHistory {
if !historyEntriesEqual(baseHistory[i], history[i]) {
return false
}
if !baseHistory[i].EmptyLayer {
expectedDiffIDs++
}
}
if len(baseDiffIDs) != expectedDiffIDs {
return false
}
if buildAddsLayer {
// we're adding a layer, so we should have exactly one more
// layer than the base image
if len(diffIDs) != expectedDiffIDs+1 {
return false
}
} else {
// we're not adding a layer, so we should have exactly the same
// layers as the base image
if len(diffIDs) != expectedDiffIDs {
return false
}
}
// compare the diffs for the layers that we should have in common
for i := range baseDiffIDs {
if diffIDs[i] != baseDiffIDs[i] {
return false
}
}
return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary)
}
// getCreatedBy returns the command the image at node will be created by. If
// the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY.
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string {
if node == nil {
return "/bin/sh"
}
switch strings.ToUpper(node.Value) {
case "ARG":
buildArgs := s.getBuildArgsKey()
return "/bin/sh -c #(nop) ARG " + buildArgs
case "RUN":
buildArgs := s.getBuildArgsResolvedForRun()
if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:]
}
return "/bin/sh -c " + node.Original[4:]
case "ADD", "COPY":
destination := node
for destination.Next != nil {
destination = destination.Next
}
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " "
default:
return "/bin/sh -c #(nop) " + node.Original
}
}
// getBuildArgs returns a string of the build-args specified during the build process
// it excludes any build-args that were not used in the build process
// values for args are overridden by the values specified using ENV.
// Reason: Values from ENV will always override values specified arg.
func (s *StageExecutor) getBuildArgsResolvedForRun() string {
var envs []string
configuredEnvs := make(map[string]string)
dockerConfig := s.stage.Builder.Config()
for _, env := range dockerConfig.Env {
splitv := strings.SplitN(env, "=", 2)
if len(splitv) == 2 {
configuredEnvs[splitv[0]] = splitv[1]
}
}
for key, value := range s.stage.Builder.Args {
if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
// if value was in image it will be given higher priority
// so please embed that into build history
_, inImage := configuredEnvs[key]
if inImage {
envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key]))
} else {
envs = append(envs, fmt.Sprintf("%s=%s", key, value))
}
}
}
sort.Strings(envs)
return strings.Join(envs, " ")
}
// getBuildArgs key returns set args are key which were specified during the build process
// following function will be exclusively used by build history
func (s *StageExecutor) getBuildArgsKey() string {
var envs []string
for key := range s.stage.Builder.Args {
if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
envs = append(envs, key)
}
}
sort.Strings(envs)
return strings.Join(envs, " ")
}
// tagExistingImage adds names to an image already in the store
func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
// If we don't need to attach a name to the image, just return the cache ID.
if output == "" {
return cacheID, nil, nil
}
// Get the destination image reference.
dest, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
}
policyContext, err := util.GetPolicyContext(s.executor.systemContext)
if err != nil {
return "", nil, err
}
defer func() {
if destroyErr := policyContext.Destroy(); destroyErr != nil {
if err == nil {
err = destroyErr
} else {
err = errors.Wrap(err, destroyErr.Error())
}
}
}()
// Look up the source image, expecting it to be in local storage
src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
if err != nil {
return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID)
}
options := cp.Options{
RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image
}
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options)
if err != nil {
return "", nil, errors.Wrapf(err, "error copying image %q", cacheID)
}
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID)
}
img, err := is.Transport.GetStoreImage(s.executor.store, dest)
if err != nil {
return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest))
}
var ref reference.Canonical
if dref := dest.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest))
}
}
return img.ID, ref, nil
}
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
return "", errors.Wrap(err, "error getting image list from store")
}
var baseHistory []v1.History
var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
_, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
}
}
for _, image := range images {
var imageTopLayer *storage.Layer
var imageParentLayerID string
if image.TopLayer != "" {
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
if err != nil {
return "", errors.Wrapf(err, "error getting top layer info")
}
// Figure out which layer from this image we should
// compare our container's base layer to.
imageParentLayerID = imageTopLayer.ID
// If we haven't added a layer here, then our base
// layer should be the same as the image's layer. If
// did add a layer, then our base layer should be the
// same as the parent of the image's layer.
if buildAddsLayer {
imageParentLayerID = imageTopLayer.Parent
}
}
// If the parent of the top layer of an image is equal to the current build image's top layer,
// it means that this image is potentially a cached intermediate image from a previous
// build.
if s.builder.TopLayer != imageParentLayerID {
continue
}
// Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID)
if err != nil {
// It's possible that this image is for another architecture, which results
// in a custom-crafted error message that we'd have to use substring matching
// to recognize. Instead, ignore the image.
logrus.Debugf("error getting history of %q (%v), ignoring it", image.ID, err)
continue
}
// If this candidate isn't of the type that we're building, then it may have lost
// some format-specific information that a building-without-cache run wouldn't lose.
if manifestType != s.executor.outputFormat {
continue
}
// children + currNode is the point of the Dockerfile we are currently at.
if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) {
return image.ID, nil
}
}
return "", nil
}
// commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise.
func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) {
ib := s.stage.Builder
var imageRef types.ImageReference
if output != "" {
imageRef2, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
}
imageRef = imageRef2
}
if ib.Author != "" {
s.builder.SetMaintainer(ib.Author)
}
config := ib.Config()
if createdBy != "" {
s.builder.SetCreatedBy(createdBy)
}
s.builder.SetHostname(config.Hostname)
s.builder.SetDomainname(config.Domainname)
if s.executor.architecture != "" {
s.builder.SetArchitecture(s.executor.architecture)
}
if s.executor.os != "" {
s.builder.SetOS(s.executor.os)
}
s.builder.SetUser(config.User)
s.builder.ClearPorts()
for p := range config.ExposedPorts {
s.builder.SetPort(string(p))
}
for _, envSpec := range config.Env {
spec := strings.SplitN(envSpec, "=", 2)
s.builder.SetEnv(spec[0], spec[1])
}
s.builder.SetCmd(config.Cmd)
s.builder.ClearVolumes()
for v := range config.Volumes {
s.builder.AddVolume(v)
}
s.builder.ClearOnBuild()
for _, onBuildSpec := range config.OnBuild {
s.builder.SetOnBuild(onBuildSpec)
}
s.builder.SetWorkDir(config.WorkingDir)
s.builder.SetEntrypoint(config.Entrypoint)
s.builder.SetShell(config.Shell)
s.builder.SetStopSignal(config.StopSignal)
if config.Healthcheck != nil {
s.builder.SetHealthcheck(&buildahdocker.HealthConfig{
Test: append([]string{}, config.Healthcheck.Test...),
Interval: config.Healthcheck.Interval,
Timeout: config.Healthcheck.Timeout,
StartPeriod: config.Healthcheck.StartPeriod,
Retries: config.Healthcheck.Retries,
})
} else {
s.builder.SetHealthcheck(nil)
}
s.builder.ClearLabels()
for k, v := range config.Labels {
s.builder.SetLabel(k, v)
}
for _, labelSpec := range s.executor.labels {
label := strings.SplitN(labelSpec, "=", 2)
if len(label) > 1 {
s.builder.SetLabel(label[0], label[1])
} else {
s.builder.SetLabel(label[0], "")
}
}
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
for _, annotationSpec := range s.executor.annotations {
annotation := strings.SplitN(annotationSpec, "=", 2)
if len(annotation) > 1 {
s.builder.SetAnnotation(annotation[0], annotation[1])
} else {
s.builder.SetAnnotation(annotation[0], "")
}
}
if imageRef != nil {
logName := transports.ImageName(imageRef)
logrus.Debugf("COMMIT %q", logName)
} else {
logrus.Debugf("COMMIT")
}
writer := s.executor.reportWriter
if s.executor.layers || !s.executor.useCache {
writer = nil
}
options := buildah.CommitOptions{
Compression: s.executor.compression,
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: writer,
PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext,
Squash: s.executor.squash,
EmptyLayer: emptyLayer,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
HistoryTimestamp: s.executor.timestamp,
Manifest: s.executor.manifest,
UnsetEnvs: s.executor.unsetEnvs,
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
return "", nil, err
}
var ref reference.Canonical
if imageRef != nil {
if dref := imageRef.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
}
}
}
return imgID, ref, nil
}
func (s *StageExecutor) EnsureContainerPath(path string) error {
return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{})
}
|
[
"\"BUILDAH_NOPIVOT\""
] |
[] |
[
"BUILDAH_NOPIVOT"
] |
[]
|
["BUILDAH_NOPIVOT"]
|
go
| 1 | 0 | |
nbviewer/providers/gist/handlers.py
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
from tornado import web
from .. import _load_handler_from_location
from ...utils import clean_filename
from ...utils import quote
from ...utils import response_text
from ...utils import url_path_join
from ..base import BaseHandler
from ..base import cached
from ..base import RenderingHandler
from ..github.handlers import GithubClientMixin
class GistClientMixin(GithubClientMixin):
# PROVIDER_CTX is a dictionary whose entries are passed as keyword arguments
# to the render_template method of the GistHandler. The following describe
# the information contained in each of these keyword arguments:
# provider_label: str
# Text to to apply to the navbar icon linking to the provider
# provider_icon: str
# CSS classname to apply to the navbar icon linking to the provider
# executor_label: str, optional
# Text to apply to the navbar icon linking to the execution service
# executor_icon: str, optional
# CSS classname to apply to the navbar icon linking to the execution service
PROVIDER_CTX = {
"provider_label": "Gist",
"provider_icon": "github-square",
"executor_label": "Binder",
"executor_icon": "icon-binder",
}
BINDER_TMPL = "{binder_base_url}/gist/{user}/{gist_id}/master"
BINDER_PATH_TMPL = BINDER_TMPL + "?filepath={path}"
def client_error_message(self, exc, url, body, msg=None):
if exc.code == 403 and "too big" in body.lower():
return 400, "GitHub will not serve raw gists larger than 10MB"
return super().client_error_message(exc, url, body, msg)
class UserGistsHandler(GistClientMixin, BaseHandler):
"""list a user's gists containing notebooks
.ipynb file extension is required for listing (not for rendering).
"""
def render_usergists_template(
self, entries, user, provider_url, prev_url, next_url, **namespace
):
"""
provider_url: str
URL to the notebook document upstream at the provider (e.g., GitHub)
executor_url: str, optional (kwarg passed into `namespace`)
URL to execute the notebook document (e.g., Binder)
"""
return self.render_template(
"usergists.html",
entries=entries,
user=user,
provider_url=provider_url,
prev_url=prev_url,
next_url=next_url,
**self.PROVIDER_CTX,
**namespace
)
@cached
async def get(self, user, **namespace):
page = self.get_argument("page", None)
params = {}
if page:
params["page"] = page
with self.catch_client_error():
response = await self.github_client.get_gists(user, params=params)
prev_url, next_url = self.get_page_links(response)
gists = json.loads(response_text(response))
entries = []
for gist in gists:
notebooks = [f for f in gist["files"] if f.endswith(".ipynb")]
if notebooks:
entries.append(
dict(
id=gist["id"],
notebooks=notebooks,
description=gist["description"] or "",
)
)
if self.github_url == "https://github.com/":
gist_base_url = "https://gist.github.com/"
else:
gist_base_url = url_path_join(self.github_url, "gist/")
provider_url = url_path_join(gist_base_url, "{user}".format(user=user))
html = self.render_usergists_template(
entries=entries,
user=user,
provider_url=provider_url,
prev_url=prev_url,
next_url=next_url,
**namespace
)
await self.cache_and_finish(html)
class GistHandler(GistClientMixin, RenderingHandler):
"""render a gist notebook, or list files if a multifile gist"""
async def parse_gist(self, user, gist_id, filename=""):
with self.catch_client_error():
response = await self.github_client.get_gist(gist_id)
gist = json.loads(response_text(response))
gist_id = gist["id"]
if user is None:
# redirect to /gist/user/gist_id if no user given
owner_dict = gist.get("owner", {})
if owner_dict:
user = owner_dict["login"]
else:
user = "anonymous"
new_url = "{format}/gist/{user}/{gist_id}".format(
format=self.format_prefix, user=user, gist_id=gist_id
)
if filename:
new_url = new_url + "/" + filename
self.redirect(self.from_base(new_url))
return
files = gist["files"]
many_files_gist = len(files) > 1
# user and gist_id get modified
return user, gist_id, gist, files, many_files_gist
# Analogous to GitHubTreeHandler
async def tree_get(self, user, gist_id, gist, files):
"""
user, gist_id, gist, and files are (most) of the values returned by parse_gist
"""
entries = []
ipynbs = []
others = []
for file in files.values():
e = {}
e["name"] = file["filename"]
if file["filename"].endswith(".ipynb"):
e["url"] = quote("/%s/%s" % (gist_id, file["filename"]))
e["class"] = "fa-book"
ipynbs.append(e)
else:
if self.github_url == "https://github.com/":
gist_base_url = "https://gist.github.com/"
else:
gist_base_url = url_path_join(self.github_url, "gist/")
provider_url = url_path_join(
gist_base_url,
"{user}/{gist_id}#file-{clean_name}".format(
user=user,
gist_id=gist_id,
clean_name=clean_filename(file["filename"]),
),
)
e["url"] = provider_url
e["class"] = "fa-share"
others.append(e)
entries.extend(ipynbs)
entries.extend(others)
# Enable a binder navbar icon if a binder base URL is configured
executor_url = (
self.BINDER_TMPL.format(
binder_base_url=self.binder_base_url,
user=user.rstrip("/"),
gist_id=gist_id,
)
if self.binder_base_url
else None
)
# provider_url:
# URL to the notebook document upstream at the provider (e.g., GitHub)
# executor_url: str, optional
# URL to execute the notebook document (e.g., Binder)
html = self.render_template(
"treelist.html",
entries=entries,
tree_type="gist",
tree_label="gists",
user=user.rstrip("/"),
provider_url=gist["html_url"],
executor_url=executor_url,
**self.PROVIDER_CTX
)
await self.cache_and_finish(html)
# Analogous to GitHubBlobHandler
async def file_get(self, user, gist_id, filename, gist, many_files_gist, file):
content = await self.get_notebook_data(gist_id, filename, many_files_gist, file)
if not content:
return
await self.deliver_notebook(user, gist_id, filename, gist, file, content)
# Only called by file_get
async def get_notebook_data(self, gist_id, filename, many_files_gist, file):
"""
gist_id, filename, many_files_gist, file are all passed to file_get
"""
if (file["type"] or "").startswith("image/"):
self.log.debug(
"Fetching raw image (%s) %s/%s: %s",
file["type"],
gist_id,
filename,
file["raw_url"],
)
response = await self.fetch(file["raw_url"])
# use raw bytes for images:
content = response.body
elif file["truncated"]:
self.log.debug(
"Gist %s/%s truncated, fetching %s", gist_id, filename, file["raw_url"]
)
response = await self.fetch(file["raw_url"])
content = response_text(response, encoding="utf-8")
else:
content = file["content"]
if many_files_gist and not filename.endswith(".ipynb"):
self.set_header("Content-Type", file.get("type") or "text/plain")
# cannot redirect because of X-Frame-Content
self.finish(content)
return
else:
return content
# Only called by file_get
async def deliver_notebook(self, user, gist_id, filename, gist, file, content):
"""
user, gist_id, filename, gist, file, are the same values as those
passed into file_get, whereas content is returned from
get_notebook_data using user, gist_id, filename, gist, and file.
"""
# Enable a binder navbar icon if a binder base URL is configured
executor_url = (
self.BINDER_PATH_TMPL.format(
binder_base_url=self.binder_base_url,
user=user.rstrip("/"),
gist_id=gist_id,
path=quote(filename),
)
if self.binder_base_url
else None
)
# provider_url: str, optional
# URL to the notebook document upstream at the provider (e.g., GitHub)
await self.finish_notebook(
content,
file["raw_url"],
msg="gist: %s" % gist_id,
public=gist["public"],
provider_url=gist["html_url"],
executor_url=executor_url,
**self.PROVIDER_CTX
)
@cached
async def get(self, user, gist_id, filename=""):
"""
Encompasses both the case of a single file gist, handled by
`file_get`, as well as a many-file gist, handled by `tree_get`.
"""
parsed_gist = await self.parse_gist(user, gist_id, filename)
if parsed_gist is not None:
user, gist_id, gist, files, many_files_gist = parsed_gist
else:
return
if many_files_gist and not filename:
await self.tree_get(user, gist_id, gist, files)
else:
if not many_files_gist and not filename:
filename = list(files.keys())[0]
if filename not in files:
raise web.HTTPError(
404, "No such file in gist: %s (%s)", filename, list(files.keys())
)
file = files[filename]
await self.file_get(user, gist_id, filename, gist, many_files_gist, file)
class GistRedirectHandler(BaseHandler):
"""redirect old /<gist-id> to new /gist/<gist-id>"""
def get(self, gist_id, file=""):
new_url = "%s/gist/%s" % (self.format_prefix, gist_id)
if file:
new_url = "%s/%s" % (new_url, file)
self.log.info("Redirecting %s to %s", self.request.uri, new_url)
self.redirect(self.from_base(new_url))
def default_handlers(handlers=[], **handler_names):
"""Tornado handlers"""
gist_handler = _load_handler_from_location(handler_names["gist_handler"])
user_gists_handler = _load_handler_from_location(
handler_names["user_gists_handler"]
)
return handlers + [
(r"/gist/([^\/]+/)?([0-9]+|[0-9a-f]{20,})", gist_handler, {}),
(r"/gist/([^\/]+/)?([0-9]+|[0-9a-f]{20,})/(?:files/)?(.*)", gist_handler, {}),
(r"/([0-9]+|[0-9a-f]{20,})", GistRedirectHandler, {}),
(r"/([0-9]+|[0-9a-f]{20,})/(.*)", GistRedirectHandler, {}),
(r"/gist/([^\/]+)/?", user_gists_handler, {}),
]
def uri_rewrites(rewrites=[]):
gist_rewrites = [
(r"^([a-f0-9]+)/?$", "/{0}"),
(r"^https?://gist.github.com/([^\/]+/)?([a-f0-9]+)/?$", "/{1}"),
]
# github enterprise
if os.environ.get("GITHUB_API_URL", "") != "":
gist_base_url = url_path_join(
os.environ.get("GITHUB_API_URL").split("/api/v3")[0], "gist/"
)
gist_rewrites.extend(
[
# Fetching the Gist ID which is embedded in the URL, but with a different base URL
(r"^" + gist_base_url + r"([^\/]+/)?([a-f0-9]+)/?$", "/{1}")
]
)
return gist_rewrites + rewrites
|
[] |
[] |
[
"GITHUB_API_URL"
] |
[]
|
["GITHUB_API_URL"]
|
python
| 1 | 0 | |
app/kamenica/asgi.py
|
"""
ASGI config for kamenica project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kamenica.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
statify/config.py
|
import os
from pathlib import Path
VERSION = '1.0'
STATIFY_PATH = Path(os.environ.get('STATIFY_DATA',
Path.home() / '.data' / 'statify'))
CONFIG_PATH = Path(os.environ.get('STATIFY_CONFIG',
Path.home() / '.config' / 'statify.yaml'))
|
[] |
[] |
[
"STATIFY_CONFIG",
"STATIFY_DATA"
] |
[]
|
["STATIFY_CONFIG", "STATIFY_DATA"]
|
python
| 2 | 0 | |
topverbs/syntax.py
|
import os
import ast
from nltk import pos_tag
import logging
import logging.config
from helpers import get_file_content, make_list_flat
DEBUG = os.environ.get('DEBUG') == 'true'
if DEBUG:
logging.config.fileConfig('log.conf')
logger = logging.getLogger(__name__)
def is_verb(word):
if not word:
return False
pos_info = pos_tag([word])
return pos_info[0][1] == 'VB'
def is_noun(word):
if not word:
return False
pos_info = pos_tag([word])
return pos_info[0][1] == 'NN'
def get_syntax_trees_from_files(file_names):
trees = []
for filename in file_names:
file_content = get_file_content(filename)
try:
tree = ast.parse(file_content)
except SyntaxError as e:
logger.debug(e)
continue
trees.append(tree)
return trees
def get_verbs_from_function_name(function_name):
return [word for word in function_name.split('_') if is_verb(word)]
def get_nouns_from_function_name(function_name):
return [word for word in function_name.split('_') if is_noun(word)]
def get_functions_from_tree(tree):
return [node.name.lower() for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)]
def get_variable_names_from_body(body):
_list = []
for node in body:
if isinstance(node, ast.Assign) and isinstance(node.targets[0], ast.Name):
_list.append(node.targets[0].id)
return _list
def get_variables_names_from_tree(tree):
variables_names = []
body_functions = [node.body for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)]
for body in body_functions:
variables_names += get_variable_names_from_body(body)
return variables_names
def clean_special_names(all_function_names):
functions = []
for function_name in all_function_names:
if not (function_name.startswith('__') and function_name.endswith('__')):
functions.append(function_name)
return functions
def get_all_function_names(trees):
name_lists = [get_functions_from_tree(tree) for tree in trees]
return make_list_flat(name_lists)
def get_all_variables_names(trees):
name_lists = [get_variables_names_from_tree(tree) for tree in trees]
return make_list_flat(name_lists)
def get_verbs(function_name_list):
verbs = [get_verbs_from_function_name(function_name) for function_name in function_name_list]
return make_list_flat(verbs)
def get_nouns(function_name_list):
nouns = [get_nouns_from_function_name(function_name) for function_name in function_name_list]
return make_list_flat(nouns)
|
[] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
python
| 1 | 0 | |
plugin/cmd.go
|
package plugin
import (
"flag"
"net"
"os"
"regexp"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
caddycmd "github.com/caddyserver/caddy/v2/cmd"
"github.com/ACAVJW4H/caddy-docker-proxy/plugin/config"
"github.com/ACAVJW4H/caddy-docker-proxy/plugin/generator"
"go.uber.org/zap"
)
var isTrue = regexp.MustCompile("(?i)^(true|yes|1)$")
func init() {
caddycmd.RegisterCommand(caddycmd.Command{
Name: "docker-proxy",
Func: cmdFunc,
Usage: "<command>",
Short: "Run caddy as a docker proxy",
Flags: func() *flag.FlagSet {
fs := flag.NewFlagSet("docker-proxy", flag.ExitOnError)
fs.Bool("mode", false,
"Which mode this instance should run: standalone | controller | server")
fs.String("docker-sockets", "",
"Docker sockets comma separate")
fs.String("docker-certs-path", "",
"Docker socket certs path comma separate")
fs.String("docker-apis-version", "",
"Docker socket apis version comma separate")
fs.String("controller-network", "",
"Network allowed to configure caddy server in CIDR notation. Ex: 10.200.200.0/24")
fs.String("ingress-networks", "",
"Comma separated name of ingress networks connecting caddy servers to containers.\n"+
"When not defined, networks attached to controller container are considered ingress networks")
fs.String("caddyfile-path", "",
"Path to a base Caddyfile that will be extended with docker sites")
fs.String("label-prefix", generator.DefaultLabelPrefix,
"Prefix for Docker labels")
fs.Bool("proxy-service-tasks", true,
"Proxy to service tasks instead of service load balancer")
fs.Bool("process-caddyfile", true,
"Process Caddyfile before loading it, removing invalid servers")
fs.Duration("polling-interval", 30*time.Second,
"Interval caddy should manually check docker for a new caddyfile")
return fs
}(),
})
}
func cmdFunc(flags caddycmd.Flags) (int, error) {
caddy.TrapSignals()
options := createOptions(flags)
log := logger()
if options.Mode&config.Server == config.Server {
log.Info("Running caddy proxy server")
err := caddy.Run(&caddy.Config{
Admin: &caddy.AdminConfig{
Listen: getAdminListen(options),
},
})
if err != nil {
return 1, err
}
}
if options.Mode&config.Controller == config.Controller {
log.Info("Running caddy proxy controller")
loader := CreateDockerLoader(options)
if err := loader.Start(); err != nil {
if err := caddy.Stop(); err != nil {
return 1, err
}
return 1, err
}
}
select {}
}
func getAdminListen(options *config.Options) string {
if options.ControllerNetwork != nil {
ifaces, err := net.Interfaces()
log := logger()
if err != nil {
log.Error("Failed to get network interfaces", zap.Error(err))
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
log.Error("Failed to get network interface addresses", zap.Error(err))
continue
}
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
if options.ControllerNetwork.Contains(v.IP) {
return "tcp/" + v.IP.String() + ":2019"
}
break
case *net.IPNet:
if options.ControllerNetwork.Contains(v.IP) {
return "tcp/" + v.IP.String() + ":2019"
}
break
}
}
}
}
return "tcp/localhost:2019"
}
func createOptions(flags caddycmd.Flags) *config.Options {
caddyfilePath := flags.String("caddyfile-path")
labelPrefixFlag := flags.String("label-prefix")
proxyServiceTasksFlag := flags.Bool("proxy-service-tasks")
processCaddyfileFlag := flags.Bool("process-caddyfile")
pollingIntervalFlag := flags.Duration("polling-interval")
modeFlag := flags.String("mode")
controllerSubnetFlag := flags.String("controller-network")
dockerSocketsFlag := flags.String("docker-sockets")
dockerCertsPathFlag := flags.String("docker-certs-path")
dockerAPIsVersionFlag := flags.String("docker-apis-version")
ingressNetworksFlag := flags.String("ingress-networks")
options := &config.Options{}
var mode string
if modeEnv := os.Getenv("CADDY_DOCKER_MODE"); modeEnv != "" {
mode = modeEnv
} else {
mode = modeFlag
}
switch mode {
case "controller":
options.Mode = config.Controller
break
case "server":
options.Mode = config.Server
default:
options.Mode = config.Standalone
}
log := logger()
if dockerSocketsEnv := os.Getenv("CADDY_DOCKER_SOCKETS"); dockerSocketsEnv != "" {
options.DockerSockets = strings.Split(dockerSocketsEnv, ",")
} else {
options.DockerSockets = strings.Split(dockerSocketsFlag, ",")
}
if dockerCertsPathEnv := os.Getenv("CADDY_DOCKER_CERTS_PATH"); dockerCertsPathEnv != "" {
options.DockerCertsPath = strings.Split(dockerCertsPathEnv, ",")
} else {
options.DockerCertsPath = strings.Split(dockerCertsPathFlag, ",")
}
if dockerAPIsVersionEnv := os.Getenv("CADDY_DOCKER_APIS_VERSION"); dockerAPIsVersionEnv != "" {
options.DockerAPIsVersion = strings.Split(dockerAPIsVersionEnv, ",")
} else {
options.DockerAPIsVersion = strings.Split(dockerAPIsVersionFlag, ",")
}
if controllerIPRangeEnv := os.Getenv("CADDY_CONTROLLER_NETWORK"); controllerIPRangeEnv != "" {
_, ipNet, err := net.ParseCIDR(controllerIPRangeEnv)
if err != nil {
log.Error("Failed to parse CADDY_CONTROLLER_NETWORK", zap.String("CADDY_CONTROLLER_NETWORK", controllerIPRangeEnv), zap.Error(err))
} else if ipNet != nil {
options.ControllerNetwork = ipNet
}
} else if controllerSubnetFlag != "" {
_, ipNet, err := net.ParseCIDR(controllerSubnetFlag)
if err != nil {
log.Error("Failed to parse controller-network", zap.String("controller-network", controllerSubnetFlag), zap.Error(err))
} else if ipNet != nil {
options.ControllerNetwork = ipNet
}
}
if ingressNetworksEnv := os.Getenv("CADDY_INGRESS_NETWORKS"); ingressNetworksEnv != "" {
options.IngressNetworks = strings.Split(ingressNetworksEnv, ",")
} else if ingressNetworksFlag != "" {
options.IngressNetworks = strings.Split(ingressNetworksFlag, ",")
}
if caddyfilePathEnv := os.Getenv("CADDY_DOCKER_CADDYFILE_PATH"); caddyfilePathEnv != "" {
options.CaddyfilePath = caddyfilePathEnv
} else {
options.CaddyfilePath = caddyfilePath
}
if labelPrefixEnv := os.Getenv("CADDY_DOCKER_LABEL_PREFIX"); labelPrefixEnv != "" {
options.LabelPrefix = labelPrefixEnv
} else {
options.LabelPrefix = labelPrefixFlag
}
options.ControlledServersLabel = options.LabelPrefix + "_controlled_server"
if proxyServiceTasksEnv := os.Getenv("CADDY_DOCKER_PROXY_SERVICE_TASKS"); proxyServiceTasksEnv != "" {
options.ProxyServiceTasks = isTrue.MatchString(proxyServiceTasksEnv)
} else {
options.ProxyServiceTasks = proxyServiceTasksFlag
}
if processCaddyfileEnv := os.Getenv("CADDY_DOCKER_PROCESS_CADDYFILE"); processCaddyfileEnv != "" {
options.ProcessCaddyfile = isTrue.MatchString(processCaddyfileEnv)
} else {
options.ProcessCaddyfile = processCaddyfileFlag
}
if pollingIntervalEnv := os.Getenv("CADDY_DOCKER_POLLING_INTERVAL"); pollingIntervalEnv != "" {
if p, err := time.ParseDuration(pollingIntervalEnv); err != nil {
log.Error("Failed to parse CADDY_DOCKER_POLLING_INTERVAL", zap.String("CADDY_DOCKER_POLLING_INTERVAL", pollingIntervalEnv), zap.Error(err))
options.PollingInterval = pollingIntervalFlag
} else {
options.PollingInterval = p
}
} else {
options.PollingInterval = pollingIntervalFlag
}
return options
}
|
[
"\"CADDY_DOCKER_MODE\"",
"\"CADDY_DOCKER_SOCKETS\"",
"\"CADDY_DOCKER_CERTS_PATH\"",
"\"CADDY_DOCKER_APIS_VERSION\"",
"\"CADDY_CONTROLLER_NETWORK\"",
"\"CADDY_INGRESS_NETWORKS\"",
"\"CADDY_DOCKER_CADDYFILE_PATH\"",
"\"CADDY_DOCKER_LABEL_PREFIX\"",
"\"CADDY_DOCKER_PROXY_SERVICE_TASKS\"",
"\"CADDY_DOCKER_PROCESS_CADDYFILE\"",
"\"CADDY_DOCKER_POLLING_INTERVAL\""
] |
[] |
[
"CADDY_DOCKER_APIS_VERSION",
"CADDY_DOCKER_MODE",
"CADDY_DOCKER_LABEL_PREFIX",
"CADDY_DOCKER_PROCESS_CADDYFILE",
"CADDY_DOCKER_CERTS_PATH",
"CADDY_INGRESS_NETWORKS",
"CADDY_CONTROLLER_NETWORK",
"CADDY_DOCKER_SOCKETS",
"CADDY_DOCKER_CADDYFILE_PATH",
"CADDY_DOCKER_POLLING_INTERVAL",
"CADDY_DOCKER_PROXY_SERVICE_TASKS"
] |
[]
|
["CADDY_DOCKER_APIS_VERSION", "CADDY_DOCKER_MODE", "CADDY_DOCKER_LABEL_PREFIX", "CADDY_DOCKER_PROCESS_CADDYFILE", "CADDY_DOCKER_CERTS_PATH", "CADDY_INGRESS_NETWORKS", "CADDY_CONTROLLER_NETWORK", "CADDY_DOCKER_SOCKETS", "CADDY_DOCKER_CADDYFILE_PATH", "CADDY_DOCKER_POLLING_INTERVAL", "CADDY_DOCKER_PROXY_SERVICE_TASKS"]
|
go
| 11 | 0 | |
test/test_CTPN.py
|
# coding:utf-8
'''
Test for CTPN implementation
Created : 7, 27, 2018
Revised : 7, 27, 2018
All rights reserved
'''
__author__ = 'dawei.leng'
import os
os.environ['THEANO_FLAGS'] = "floatX=float32, mode=FAST_RUN, warn_float64='raise'"
# os.environ['THEANO_FLAGS'] = "floatX=float32, mode=DEBUG_MODE, warn_float64='raise', exception_verbosity=high"
import theano
from theano import tensor
from dandelion.module import *
from dandelion.activation import *
from dandelion.model.ctpn import model_CTPN
from dandelion.objective import *
import dandelion
dandelion_path = os.path.split(dandelion.__file__)[0]
print('dandelion path = %s\n' % dandelion_path)
def test_case_0():
model = model_CTPN(k=10, do_side_refinement_regress=False,
batchnorm_mode=1, channel=3, im_height=None, im_width=None,
kernel_size=3, border_mode=(1, 1), VGG_flip_filters=False,
im2col=None)
x = tensor.ftensor4('x')
y1 = tensor.ftensor5('y1')
y2 = tensor.ftensor4('y2')
class_score, bboxs = model.forward(x)
#--- check back-prop ---#
loss = aggregate(squared_error(y1, class_score)) + aggregate(squared_error(y2, bboxs))
grad = theano.grad(loss, model.collect_params())
print('back-prop test pass')
print('compiling fn...')
fn = theano.function([x], [class_score, bboxs], no_default_updates=False, on_unused_input='ignore')
print('run fn...')
input = np.random.rand(4, 3, 256, 256).astype(np.float32)
class_score, bboxs = fn(input)
assert class_score.shape == (4, 16, 16, 10, 2), 'class_score shape not correct'
assert bboxs.shape == (4, 16, 16, 10, 2), 'bboxs shape not correct'
# print(class_score.shape)
# print(bboxs.shape)
if __name__ == '__main__':
test_case_0()
print('Test passed')
|
[] |
[] |
[
"THEANO_FLAGS"
] |
[]
|
["THEANO_FLAGS"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from dotenv import load_dotenv
load_dotenv()
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
google/appengine/tools/appengine_rpc.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for performing authenticated RPCs against App Engine."""
import google
import cookielib
import cStringIO
import fancy_urllib
import gzip
import logging
import os
import re
import socket
import sys
import time
import urllib
import urllib2
from google.appengine.tools import dev_appserver_login
logger = logging.getLogger('google.appengine.tools.appengine_rpc')
def GetPlatformToken(os_module=os, sys_module=sys, platform=sys.platform):
"""Returns a 'User-agent' token for the host system platform.
Args:
os_module, sys_module, platform: Used for testing.
Returns:
String containing the platform token for the host system.
"""
if hasattr(sys_module, "getwindowsversion"):
windows_version = sys_module.getwindowsversion()
version_info = ".".join(str(i) for i in windows_version[:4])
return platform + "/" + version_info
elif hasattr(os_module, "uname"):
uname = os_module.uname()
return "%s/%s" % (uname[0], uname[2])
else:
return "unknown"
def HttpRequestToString(req, include_data=True):
"""Converts a urllib2.Request to a string.
Args:
req: urllib2.Request
Returns:
Multi-line string representing the request.
"""
headers = ""
for header in req.header_items():
headers += "%s: %s\n" % (header[0], header[1])
template = ("%(method)s %(selector)s %(type)s/1.1\n"
"Host: %(host)s\n"
"%(headers)s")
if include_data:
template = template + "\n%(data)s"
return template % {
'method' : req.get_method(),
'selector' : req.get_selector(),
'type' : req.get_type().upper(),
'host' : req.get_host(),
'headers': headers,
'data': req.get_data(),
}
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
self.info = args.get("Info")
def read(self):
return '%d %s: %s' % (self.code, self.msg, self.reason)
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
SUGGEST_OAUTH2 = False
def __init__(self, host, auth_function, user_agent, source,
host_override=None, extra_headers=None, save_cookies=False,
auth_tries=3, account_type=None, debug_data=True, secure=True,
rpc_tries=3):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
user_agent: The user-agent string to send to the server. Specify None to
omit the user-agent header.
source: The source to specify in authentication requests.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request. Values
supplied here will override other default headers that are supplied.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
auth_tries: The number of times to attempt auth_function before failing.
account_type: One of GOOGLE, HOSTED_OR_GOOGLE, or None for automatic.
debug_data: Whether debugging output should include data contents.
secure: If the requests sent using Send should be sent over HTTPS.
rpc_tries: The number of rpc retries upon http server error (i.e.
Response code >= 500 and < 600) before failing.
"""
if secure:
self.scheme = "https"
else:
self.scheme = "http"
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.source = source
self.authenticated = False
self.auth_tries = auth_tries
self.debug_data = debug_data
self.rpc_tries = rpc_tries
self.account_type = account_type
self.extra_headers = {}
if user_agent:
self.extra_headers["User-Agent"] = user_agent
if extra_headers:
self.extra_headers.update(extra_headers)
self.save_cookies = save_cookies
self.cookie_jar = cookielib.MozillaCookieJar()
self.opener = self._GetOpener()
if self.host_override:
logger.debug("Server: %s; Host: %s", self.host, self.host_override)
else:
logger.debug("Server: %s", self.host)
if ((self.host_override and self.host_override == "localhost") or
self.host == "localhost" or self.host.startswith("localhost:")):
self._DevAppServerAuthenticate()
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
req = fancy_urllib.FancyRequest(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if not account_type:
if (self.host.split(':')[0].endswith(".google.com")
or (self.host_override
and self.host_override.split(':')[0].endswith(".google.com"))):
account_type = "HOSTED_OR_GOOGLE"
else:
account_type = "GOOGLE"
data = {
"Email": email,
"Passwd": password,
"service": "ah",
"source": self.source,
"accountType": account_type
}
req = self._CreateRequest(
url=("https://%s/accounts/ClientLogin" %
os.getenv("APPENGINE_AUTH_SERVER", "www.google.com")),
data=urllib.urlencode(data))
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
if os.getenv("APPENGINE_RPC_USE_SID", "0") == "1":
self.extra_headers["Cookie"] = (
'SID=%s; Path=/;' % response_dict["SID"])
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
login_path = os.environ.get("APPCFG_LOGIN_PATH", "/_ah")
req = self._CreateRequest("%s://%s%s/login?%s" %
(self.scheme, self.host, login_path,
urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for unused_i in range(self.auth_tries):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
if os.getenv("APPENGINE_RPC_USE_SID", "0") == "1":
return
except ClientLoginError, e:
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, ("Use an application-specific password instead "
"of your regular account password.")
print >>sys.stderr, ("See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
if self.SUGGEST_OAUTH2:
print >>sys.stderr, ("However, now the recommended way to log in "
"is using OAuth2. See")
print >>sys.stderr, ("https://developers.google.com/appengine/"
"docs/python/tools/uploadinganapp#oauth")
else:
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def _DevAppServerAuthenticate(self):
"""Authenticates the user on the dev_appserver."""
credentials = self.auth_function()
value = dev_appserver_login.CreateCookieData(credentials[0], True)
self.extra_headers["Cookie"] = ('dev_appserver_login="%s"; Path=/;' % value)
def Send(self, request_path, payload="",
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
auth_tried = False
while True:
tries += 1
url = "%s://%s%s" % (self.scheme, self.host, request_path)
if kwargs:
url += "?" + urllib.urlencode(kwargs)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
req.add_header("X-appcfg-api-version", "1")
try:
logger.debug('Sending %s request:\n%s',
self.scheme.upper(),
HttpRequestToString(req, include_data=self.debug_data))
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
logger.debug("Got http error, this is try #%s", tries)
if tries > self.rpc_tries:
raise
elif e.code == 401:
if auth_tried:
raise
auth_tried = True
self._Authenticate()
elif e.code >= 500 and e.code < 600:
continue
elif e.code == 302:
if auth_tried:
raise
auth_tried = True
loc = e.info()["location"]
logger.debug("Got 302 redirect. Location: %s", loc)
if loc.startswith("https://www.google.com/accounts/ServiceLogin"):
self._Authenticate()
elif re.match(r"https://www.google.com/a/[a-z0-9.-]+/ServiceLogin",
loc):
self.account_type = os.getenv("APPENGINE_RPC_HOSTED_LOGIN_TYPE",
"HOSTED")
self._Authenticate()
elif loc.startswith("http://%s/_ah/login" % (self.host,)):
self._DevAppServerAuthenticate()
else:
raise
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class ContentEncodingHandler(urllib2.BaseHandler):
"""Request and handle HTTP Content-Encoding."""
def http_request(self, request):
request.add_header("Accept-Encoding", "gzip")
for header in request.headers:
if header.lower() == "user-agent":
request.headers[header] += " gzip"
return request
https_request = http_request
def http_response(self, req, resp):
"""Handle encodings in the order that they are encountered."""
encodings = []
headers = resp.headers
for header in headers:
if header.lower() == "content-encoding":
for encoding in headers.get(header, "").split(","):
encoding = encoding.strip()
if encoding:
encodings.append(encoding)
break
if not encodings:
return resp
del headers[header]
fp = resp
while encodings and encodings[-1].lower() == "gzip":
fp = cStringIO.StringIO(fp.read())
fp = gzip.GzipFile(fileobj=fp, mode="r")
encodings.pop()
if encodings:
headers[header] = ", ".join(encodings)
logger.warning("Unrecognized Content-Encoding: %s", encodings[-1])
msg = resp.msg
if sys.version_info >= (2, 6):
resp = urllib2.addinfourl(fp, headers, resp.url, resp.code)
else:
response_code = resp.code
resp = urllib2.addinfourl(fp, headers, resp.url)
resp.code = response_code
resp.msg = msg
return resp
https_response = http_response
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
DEFAULT_COOKIE_FILE_PATH = "~/.appcfg_cookies"
def __init__(self, *args, **kwargs):
self.certpath = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'lib', 'cacerts',
'cacerts.txt'))
self.cert_file_available = os.path.exists(self.certpath)
super(HttpRpcServer, self).__init__(*args, **kwargs)
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
req = super(HttpRpcServer, self)._CreateRequest(url, data)
if self.cert_file_available and fancy_urllib.can_validate_certs():
req.set_ssl_info(ca_certs=self.certpath)
return req
def _CheckCookie(self):
"""Warn if cookie is not valid for at least one minute."""
min_expire = time.time() + 60
for cookie in self.cookie_jar:
if cookie.domain == self.host and not cookie.is_expired(min_expire):
break
else:
print >>sys.stderr, "\nError: Machine system clock is incorrect.\n"
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if self.cert_file_available and not fancy_urllib.can_validate_certs():
logger.warn("""ssl module not found.
Without the ssl module, the identity of the remote host cannot be verified, and
connections may NOT be secure. To fix this, please install the ssl module from
http://pypi.python.org/pypi/ssl .
To learn more, see https://developers.google.com/appengine/kb/general#rpcssl""")
super(HttpRpcServer, self)._Authenticate()
if self.cookie_jar.filename is not None and self.save_cookies:
logger.debug("Saving authentication cookies to %s",
self.cookie_jar.filename)
self.cookie_jar.save()
self._CheckCookie()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(fancy_urllib.FancyProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(fancy_urllib.FancyHTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
opener.add_handler(ContentEncodingHandler())
if self.save_cookies:
self.cookie_jar.filename = os.path.expanduser(
HttpRpcServer.DEFAULT_COOKIE_FILE_PATH)
if os.path.exists(self.cookie_jar.filename):
try:
self.cookie_jar.load()
self.authenticated = True
logger.debug("Loaded authentication cookies from %s",
self.cookie_jar.filename)
except (OSError, IOError, cookielib.LoadError), e:
logger.debug("Could not load authentication cookies; %s: %s",
e.__class__.__name__, e)
self.cookie_jar.filename = None
else:
try:
fd = os.open(self.cookie_jar.filename, os.O_CREAT, 0600)
os.close(fd)
except (OSError, IOError), e:
logger.debug("Could not create authentication cookies file; %s: %s",
e.__class__.__name__, e)
self.cookie_jar.filename = None
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class HttpRpcServerWithOAuth2Suggestion(HttpRpcServer):
"""An HttpRpcServer variant which suggests using OAuth2 instead of ASP.
Not all systems which use HttpRpcServer can use OAuth2.
"""
SUGGEST_OAUTH2 = True
|
[] |
[] |
[
"APPENGINE_RPC_USE_SID",
"APPCFG_LOGIN_PATH",
"APPENGINE_RPC_HOSTED_LOGIN_TYPE",
"APPENGINE_AUTH_SERVER"
] |
[]
|
["APPENGINE_RPC_USE_SID", "APPCFG_LOGIN_PATH", "APPENGINE_RPC_HOSTED_LOGIN_TYPE", "APPENGINE_AUTH_SERVER"]
|
python
| 4 | 0 | |
cmd/hdfs/df.go
|
package main
import (
"fmt"
"os"
"text/tabwriter"
"github.com/aristanetworks/hdfs/v2"
)
func df(humanReadable bool) {
client, err := getClient("")
if err != nil {
fatal(err)
}
var fs hdfs.FsInfo
fs, err = client.StatFs()
if err != nil {
fatal(err)
}
tw := tabwriter.NewWriter(os.Stdout, 3, 8, 0, ' ', tabwriter.AlignRight)
fmt.Fprintf(tw, "Filesystem \tSize \tUsed \tAvailable \t Use%%\n")
if humanReadable {
fmt.Fprintf(tw, "%v \t%v \t%v \t%v \t%d%%\n",
os.Getenv("HADOOP_NAMENODE"),
formatBytes(fs.Capacity),
formatBytes(fs.Used),
formatBytes(fs.Remaining),
100*fs.Used/fs.Capacity)
} else {
fmt.Fprintf(tw, "%v \t%v \t %v \t %v \t%d%%\n",
os.Getenv("HADOOP_NAMENODE"),
fs.Capacity,
fs.Used,
fs.Remaining,
100*fs.Used/fs.Capacity)
}
tw.Flush()
}
|
[
"\"HADOOP_NAMENODE\"",
"\"HADOOP_NAMENODE\""
] |
[] |
[
"HADOOP_NAMENODE"
] |
[]
|
["HADOOP_NAMENODE"]
|
go
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Waldur Core Documentation build configuration file, created by
# sphinx-quickstart on Sun May 18 17:27:42 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Waldur'
copyright = u'2014-2016, OpenNode'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from waldur_core import __version__
version =__version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/robohare.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'waldurdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'waldur_core.tex', u'Waldur Documentation',
u'Guide and API Reference', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'waldur_core', u'Waldur Core Documentation',
[u'OpenNode'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'waldur_core', u'Waldur Core Documentation',
u'OpenNode', 'waldur_core', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'waldur_core.server.doc_settings'
django.setup()
# Generate API documentation
from waldur_core.core.management.commands.drfdocs import Command
Command().handle(path='docs/drfapi')
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
query/logic.py
|
from datetime import date, datetime
from decimal import Decimal
from .base import ExpBase
from .condition import ConditionBase
class LogicBase(ExpBase):
"""Logic class to deal with logic relation"""
def __or__(self, term):
"""use `|` to act OR"""
return OR(self, term)
def __and__(self, term):
"""use `&` to act AND"""
return AND(self, term)
class L(LogicBase):
"""Convert express to Logic object"""
def __init__(self, *terms, **kw_terms):
"""Logic object"""
if len(terms)+len(kw_terms) != 1:
raise Exception(
'param error: L class must receive at least one and only one parameter')
if terms:
if not isinstance(terms[0], str):
raise Exception('only accept string')
self._value = terms[0]
else:
k, v = kw_terms.popitem()
k = k.replace('__', '.')
if isinstance(v, ConditionBase):
self._value = f'{k} {v}'
elif isinstance(v, (str, Decimal, datetime, date)):
self._value = f"{k} = '{v}'"
else:
self._value = f'{k} = {v}'
def __eq__(self, item):
return self._value == str(item)
def __repr__(self):
return f'L({self})'
class ComplexLogicBase(LogicBase):
def __init__(self, *terms, **kw_terms):
self._terms = []
for term in terms:
self._add(term)
for k, v in kw_terms.items():
self._add(L(**{k: v}))
t_list = []
if len(self)==1:
self._value = f'{self[0]}'
else:
for t in self:
if isinstance(t, ComplexLogicBase):
t_str = f'({t})'
else:
t_str = f'{t}'
t_list.append(t_str)
oper = f' {self.__class__.__name__} '
self._value = oper.join(t_list)
def __iter__(self):
return iter(self._terms)
def __len__(self):
return len(self._terms)
def __getitem__(self, key):
return self._terms[key]
def _add(self, term):
if term not in self:
if isinstance(term, LogicBase):
if isinstance(term, self.__class__):
for t in term:
self._terms.append(t)
else:
self._terms.append(term)
else:
self._terms.append(L(term))
def __repr__(self):
term_list = [repr(c) for c in self]
v = ','.join(term_list)
return f'{self.__class__.__name__}({v})'
class AND(ComplexLogicBase):
"""Logic AND"""
class OR(ComplexLogicBase):
"""Logic OR"""
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
internal/notes/builtin-SAVE/packages/cantera/package.py
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Cantera(SConsPackage):
"""Cantera is a suite of object-oriented software tools for problems
involving chemical kinetics, thermodynamics, and/or transport processes."""
homepage = "http://www.cantera.org/docs/sphinx/html/index.html"
url = "https://github.com/Cantera/cantera/archive/v2.3.0.tar.gz"
version('2.3.0', 'aebbd8d891cb1623604245398502b72e')
version('2.2.1', '9d1919bdef39ddec54485fc8a741a3aa')
variant('python', default=False,
description='Build the Cantera Python module')
variant('matlab', default=False,
description='Build the Cantera Matlab toolbox')
# Required dependencies
depends_on('[email protected]:3.0.2', when='@2.3.0:')
depends_on('googletest', when='@2.3.0:')
depends_on('eigen', when='@2.3.0:')
depends_on('boost')
depends_on('sundials') # must be compiled with -fPIC
depends_on('blas')
depends_on('lapack')
# Python module dependencies
extends('python', when='+python')
depends_on('py-cython', when='+python', type='build')
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('py-scipy', when='+python', type=('build', 'run'))
depends_on('py-3to2', when='+python', type=('build', 'run'))
# TODO: these "when" specs don't actually work
# depends_on('py-unittest2', when='+python^[email protected]', type=('build', 'run')) # noqa
# depends_on('py-unittest2py3k', when='+python^[email protected]', type=('build', 'run')) # noqa
# Matlab toolbox dependencies
extends('matlab', when='+matlab')
def build_args(self, spec, prefix):
# Valid args can be found by running `scons help`
# Required args
args = [
'build',
'prefix={0}'.format(prefix),
'VERBOSE=yes',
'CC={0}'.format(spack_cc),
'CXX={0}'.format(spack_cxx),
'FORTRAN={0}'.format(spack_fc),
'cc_flags={0}'.format(self.compiler.pic_flag),
# Allow Spack environment variables to propagate through to SCons
'env_vars=all'
]
if spec.satisfies('@:2.2.1'):
args.append('F77={0}'.format(spack_f77))
# fmt support
if spec.satisfies('@2.3.0:'):
args.append('system_fmt=y')
# Googletest support
if spec.satisfies('@2.3.0:'):
args.append('system_googletest=y')
# Eigen support
if spec.satisfies('@2.3.0:'):
args.extend([
'system_eigen=y',
'extra_inc_dirs={0}'.format(
join_path(spec['eigen'].prefix.include, 'eigen{0}'.format(
spec['eigen'].version.up_to(1)))),
])
# BLAS/LAPACK support
lapack_blas = spec['lapack'].libs + spec['blas'].libs
args.extend([
'blas_lapack_libs={0}'.format(','.join(lapack_blas.names)),
'blas_lapack_dir={0}'.format(spec['lapack'].prefix.lib)
])
# Boost support
if spec.satisfies('@2.3.0:'):
args.append('boost_inc_dir={0}'.format(
spec['boost'].prefix.include))
else:
args.extend([
'build_thread_safe=yes',
'boost_inc_dir={0}'.format(spec['boost'].prefix.include),
'boost_lib_dir={0}'.format(spec['boost'].prefix.lib),
])
# Sundials support
if spec.satisfies('@2.3.0:'):
args.append('system_sundials=y')
else:
args.extend([
'use_sundials=y',
'sundials_license={0}'.format(
spec['sundials'].prefix.LICENSE)
])
args.extend([
'sundials_include={0}'.format(spec['sundials'].prefix.include),
'sundials_libdir={0}'.format(spec['sundials'].prefix.lib),
])
# Python module
if '+python' in spec:
args.extend([
'python_package=full',
'python_cmd={0}'.format(spec['python'].command.path),
])
if spec['python'].satisfies('@3:'):
args.extend([
'python3_package=y',
'python3_cmd={0}'.format(spec['python'].command.path),
])
else:
args.append('python3_package=n')
else:
args.append('python_package=none')
args.append('python3_package=n')
# Matlab toolbox
if '+matlab' in spec:
args.extend([
'matlab_toolbox=y',
'matlab_path={0}'.format(spec['matlab'].prefix)
])
else:
args.append('matlab_toolbox=n')
return args
def test(self):
if '+python' in self.spec:
# Tests will always fail if Python dependencies aren't built
# In addition, 3 of the tests fail when run in parallel
scons('test', parallel=False)
@run_after('install')
def filter_compilers(self):
"""Run after install to tell the Makefile and SConstruct files to use
the compilers that Spack built the package with.
If this isn't done, they'll have CC, CXX, F77, and FC set to Spack's
generic cc, c++, f77, and f90. We want them to be bound to whatever
compiler they were built with."""
kwargs = {'ignore_absent': True, 'backup': False, 'string': True}
dirname = os.path.join(self.prefix, 'share/cantera/samples')
cc_files = [
'cxx/rankine/Makefile', 'cxx/NASA_coeffs/Makefile',
'cxx/kinetics1/Makefile', 'cxx/flamespeed/Makefile',
'cxx/combustor/Makefile', 'f77/SConstruct'
]
cxx_files = [
'cxx/rankine/Makefile', 'cxx/NASA_coeffs/Makefile',
'cxx/kinetics1/Makefile', 'cxx/flamespeed/Makefile',
'cxx/combustor/Makefile'
]
f77_files = [
'f77/Makefile', 'f77/SConstruct'
]
fc_files = [
'f90/Makefile', 'f90/SConstruct'
]
for filename in cc_files:
filter_file(os.environ['CC'], self.compiler.cc,
os.path.join(dirname, filename), **kwargs)
for filename in cxx_files:
filter_file(os.environ['CXX'], self.compiler.cxx,
os.path.join(dirname, filename), **kwargs)
for filename in f77_files:
filter_file(os.environ['F77'], self.compiler.f77,
os.path.join(dirname, filename), **kwargs)
for filename in fc_files:
filter_file(os.environ['FC'], self.compiler.fc,
os.path.join(dirname, filename), **kwargs)
|
[] |
[] |
[
"CXX",
"FC",
"F77",
"CC"
] |
[]
|
["CXX", "FC", "F77", "CC"]
|
python
| 4 | 0 | |
plato/config.py
|
"""
Reading runtime parameters from a standard configuration file (which is easier
to work on than JSON).
"""
import argparse
import logging
import os
import random
import sqlite3
from collections import OrderedDict, namedtuple
import yaml
from yamlinclude import YamlIncludeConstructor
class Config:
"""
Retrieving configuration parameters by parsing a configuration file
using the YAML configuration file parser.
"""
_instance = None
def __new__(cls):
if cls._instance is None:
parser = argparse.ArgumentParser()
parser.add_argument('-i',
'--id',
type=str,
help='Unique client ID.')
parser.add_argument('-p',
'--port',
type=str,
help='The port number for running a server.')
parser.add_argument('-c',
'--config',
type=str,
default='./config.yml',
help='Federated learning configuration file.')
parser.add_argument('-s',
'--server',
type=str,
default=None,
help='The server hostname and port number.')
parser.add_argument(
'-d',
'--download',
action='store_true',
help='Download the dataset to prepare for a training session.')
parser.add_argument('-l',
'--log',
type=str,
default='info',
help='Log messages level.')
args = parser.parse_args()
Config.args = args
if Config.args.id is not None:
Config.args.id = int(args.id)
if Config.args.port is not None:
Config.args.port = int(args.port)
try:
log_level = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warn': logging.WARN,
'info': logging.INFO,
'debug': logging.DEBUG
}[args.log]
except KeyError:
log_level = logging.INFO
logging.basicConfig(
format='[%(levelname)s][%(asctime)s]: %(message)s',
level=log_level,
datefmt='%H:%M:%S')
cls._instance = super(Config, cls).__new__(cls)
if 'config_file' in os.environ:
filename = os.environ['config_file']
else:
filename = args.config
YamlIncludeConstructor.add_to_loader_class(
loader_class=yaml.SafeLoader, base_dir='./configs')
if os.path.isfile(filename):
with open(filename, 'r', encoding="utf8") as config_file:
config = yaml.load(config_file, Loader=yaml.SafeLoader)
else:
# if the configuration file does not exist, use a default one
config = Config.default_config()
Config.clients = Config.namedtuple_from_dict(config['clients'])
Config.server = Config.namedtuple_from_dict(config['server'])
Config.data = Config.namedtuple_from_dict(config['data'])
Config.trainer = Config.namedtuple_from_dict(config['trainer'])
Config.algorithm = Config.namedtuple_from_dict(config['algorithm'])
if Config.args.server is not None:
Config.server = Config.server._replace(
address=args.server.split(':')[0])
Config.server = Config.server._replace(
port=args.server.split(':')[1])
if Config.args.download:
Config.clients = Config.clients._replace(total_clients=1)
Config.clients = Config.clients._replace(per_round=1)
if 'results' in config:
Config.results = Config.namedtuple_from_dict(config['results'])
if hasattr(Config().results, 'results_dir'):
Config.result_dir = Config.results.results_dir
else:
datasource = Config.data.datasource
model = Config.trainer.model_name
server_type = Config.algorithm.type
Config.result_dir = f'./results/{datasource}/{model}/{server_type}/'
if 'model' in config:
Config.model = Config.namedtuple_from_dict(config['model'])
if hasattr(Config().trainer, 'max_concurrency'):
# Using a temporary SQLite database to limit the maximum number of concurrent
# trainers
Config.sql_connection = sqlite3.connect(
"/tmp/running_trainers.sqlitedb")
Config().cursor = Config.sql_connection.cursor()
# Customizable dictionary of global parameters
Config.params: dict = {}
# A run ID is unique to each client in an experiment
Config.params['run_id'] = os.getpid()
# Pretrained models
Config.params['model_dir'] = "./models/pretrained/"
Config.params['pretrained_model_dir'] = "./models/pretrained/"
return cls._instance
@staticmethod
def namedtuple_from_dict(obj):
"""Creates a named tuple from a dictionary."""
if isinstance(obj, dict):
fields = sorted(obj.keys())
namedtuple_type = namedtuple(typename='Config',
field_names=fields,
rename=True)
field_value_pairs = OrderedDict(
(str(field), Config.namedtuple_from_dict(obj[field]))
for field in fields)
try:
return namedtuple_type(**field_value_pairs)
except TypeError:
# Cannot create namedtuple instance so fallback to dict (invalid attribute names)
return dict(**field_value_pairs)
elif isinstance(obj, (list, set, tuple, frozenset)):
return [Config.namedtuple_from_dict(item) for item in obj]
else:
return obj
@staticmethod
def is_edge_server() -> bool:
"""Returns whether the current instance is an edge server in cross-silo FL."""
return Config().args.port is not None
@staticmethod
def is_central_server() -> bool:
"""Returns whether the current instance is a central server in cross-silo FL."""
return hasattr(Config().algorithm,
'cross_silo') and Config().args.port is None
@staticmethod
def device() -> str:
"""Returns the device to be used for training."""
device = 'cpu'
if hasattr(Config().trainer, 'use_mindspore'):
pass
elif hasattr(Config().trainer, 'use_tensorflow'):
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
device = 'GPU'
tf.config.experimental.set_visible_devices(
gpus[random.randint(0,
len(gpus) - 1)], 'GPU')
else:
import torch
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
if hasattr(Config().trainer,
'parallelized') and Config().trainer.parallelized:
device = 'cuda'
else:
device = 'cuda:' + str(
random.randint(0,
torch.cuda.device_count() - 1))
return device
@staticmethod
def is_parallel() -> bool:
"""Check if the hardware and OS support data parallelism."""
import torch
return hasattr(Config().trainer, 'parallelized') and Config(
).trainer.parallelized and torch.cuda.is_available(
) and torch.distributed.is_available(
) and torch.cuda.device_count() > 1
@staticmethod
def default_config() -> dict:
''' Supply a default configuration when the configuration file is missing. '''
config = {}
config['clients'] = {}
config['clients']['type'] = 'simple'
config['clients']['total_clients'] = 1
config['clients']['per_round'] = 1
config['clients']['do_test'] = False
config['server'] = {}
config['server']['address'] = '127.0.0.1'
config['server']['port'] = 8000
config['server']['disable_clients'] = True
config['data'] = {}
config['data']['datasource'] = 'MNIST'
config['data']['data_path'] = './data'
config['data']['partition_size'] = 20000
config['data']['sampler'] = 'iid'
config['data']['random_seed'] = 1
config['trainer'] = {}
config['trainer']['type'] = 'basic'
config['trainer']['rounds'] = 5
config['trainer']['parallelized'] = False
config['trainer']['target_accuracy'] = 0.94
config['trainer']['epochs'] = 5
config['trainer']['batch_size'] = 32
config['trainer']['optimizer'] = 'SGD'
config['trainer']['learning_rate'] = 0.01
config['trainer']['momentum'] = 0.9
config['trainer']['weight_decay'] = 0.0
config['trainer']['model_name'] = 'lenet5'
config['algorithm'] = {}
config['algorithm']['type'] = 'fedavg'
return config
@staticmethod
def store() -> None:
""" Saving the current run-time configuration to a file. """
data = {}
data['clients'] = Config.clients._asdict()
data['server'] = Config.server._asdict()
data['data'] = Config.data._asdict()
data['trainer'] = Config.trainer._asdict()
data['algorithm'] = Config.algorithm._asdict()
with open(Config.args.config, "w", encoding="utf8") as out:
yaml.dump(data, out, default_flow_style=False)
|
[] |
[] |
[
"config_file"
] |
[]
|
["config_file"]
|
python
| 1 | 0 | |
dtmcli/dtmimp/utils.go
|
/*
* Copyright (c) 2021 yedf. All rights reserved.
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file.
*/
package dtmimp
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/dtm-labs/dtm/dtmcli/logger"
"github.com/go-resty/resty/v2"
)
// Logf an alias of Infof
// Deprecated: use logger.Errorf
var Logf = logger.Infof
// LogRedf an alias of Errorf
// Deprecated: use logger.Errorf
var LogRedf = logger.Errorf
// FatalIfError fatal if error is not nil
// Deprecated: use logger.FatalIfError
var FatalIfError = logger.FatalIfError
// LogIfFatalf fatal if cond is true
// Deprecated: use logger.FatalfIf
var LogIfFatalf = logger.FatalfIf
// AsError wrap a panic value as an error
func AsError(x interface{}) error {
logger.Errorf("panic wrapped to error: '%v'", x)
if e, ok := x.(error); ok {
return e
}
return fmt.Errorf("%v", x)
}
// P2E panic to error
func P2E(perr *error) {
if x := recover(); x != nil {
*perr = AsError(x)
}
}
// E2P error to panic
func E2P(err error) {
if err != nil {
panic(err)
}
}
// CatchP catch panic to error
func CatchP(f func()) (rerr error) {
defer P2E(&rerr)
f()
return nil
}
// PanicIf name is clear
func PanicIf(cond bool, err error) {
if cond {
panic(err)
}
}
// MustAtoi is string to int
func MustAtoi(s string) int {
r, err := strconv.Atoi(s)
if err != nil {
E2P(errors.New("convert to int error: " + s))
}
return r
}
// OrString return the first not empty string
func OrString(ss ...string) string {
for _, s := range ss {
if s != "" {
return s
}
}
return ""
}
// If ternary operator
func If(condition bool, trueObj interface{}, falseObj interface{}) interface{} {
if condition {
return trueObj
}
return falseObj
}
// MustMarshal checked version for marshal
func MustMarshal(v interface{}) []byte {
b, err := json.Marshal(v)
E2P(err)
return b
}
// MustMarshalString string version of MustMarshal
func MustMarshalString(v interface{}) string {
return string(MustMarshal(v))
}
// MustUnmarshal checked version for unmarshal
func MustUnmarshal(b []byte, obj interface{}) {
err := json.Unmarshal(b, obj)
E2P(err)
}
// MustUnmarshalString string version of MustUnmarshal
func MustUnmarshalString(s string, obj interface{}) {
MustUnmarshal([]byte(s), obj)
}
// MustRemarshal marshal and unmarshal, and check error
func MustRemarshal(from interface{}, to interface{}) {
b, err := json.Marshal(from)
E2P(err)
err = json.Unmarshal(b, to)
E2P(err)
}
// GetFuncName get current call func name
func GetFuncName() string {
pc, _, _, _ := runtime.Caller(1)
nm := runtime.FuncForPC(pc).Name()
return nm[strings.LastIndex(nm, ".")+1:]
}
// MayReplaceLocalhost when run in docker compose, change localhost to host.docker.internal for accessing host network
func MayReplaceLocalhost(host string) string {
if os.Getenv("IS_DOCKER") != "" {
return strings.Replace(strings.Replace(host,
"localhost", "host.docker.internal", 1),
"127.0.0.1", "host.docker.internal", 1)
}
return host
}
var sqlDbs sync.Map
// PooledDB get pooled sql.DB
func PooledDB(conf DBConf) (*sql.DB, error) {
dsn := GetDsn(conf)
db, ok := sqlDbs.Load(dsn)
if !ok {
db2, err := StandaloneDB(conf)
if err != nil {
return nil, err
}
db = db2
sqlDbs.Store(dsn, db)
}
return db.(*sql.DB), nil
}
// StandaloneDB get a standalone db instance
func StandaloneDB(conf DBConf) (*sql.DB, error) {
dsn := GetDsn(conf)
logger.Infof("opening standalone %s: %s", conf.Driver, strings.Replace(dsn, conf.Password, "****", 1))
return sql.Open(conf.Driver, dsn)
}
// DBExec use raw db to exec
func DBExec(db DB, sql string, values ...interface{}) (affected int64, rerr error) {
if sql == "" {
return 0, nil
}
began := time.Now()
sql = GetDBSpecial().GetPlaceHoldSQL(sql)
r, rerr := db.Exec(sql, values...)
used := time.Since(began) / time.Millisecond
if rerr == nil {
affected, rerr = r.RowsAffected()
logger.Debugf("used: %d ms affected: %d for %s %v", used, affected, sql, values)
} else {
logger.Errorf("used: %d ms exec error: %v for %s %v", used, rerr, sql, values)
}
return
}
// GetDsn get dsn from map config
func GetDsn(conf DBConf) string {
host := MayReplaceLocalhost(conf.Host)
driver := conf.Driver
dsn := map[string]string{
"mysql": fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=true&loc=Local&interpolateParams=true",
conf.User, conf.Password, host, conf.Port, ""),
"postgres": fmt.Sprintf("host=%s user=%s password=%s dbname='%s' port=%d sslmode=disable",
host, conf.User, conf.Password, "", conf.Port),
}[driver]
PanicIf(dsn == "", fmt.Errorf("unknow driver: %s", driver))
return dsn
}
// RespAsErrorCompatible translate a resty response to error
// compatible with version < v1.10
func RespAsErrorCompatible(resp *resty.Response) error {
code := resp.StatusCode()
str := resp.String()
if code == http.StatusTooEarly || strings.Contains(str, ResultOngoing) {
return fmt.Errorf("%s. %w", str, ErrOngoing)
} else if code == http.StatusConflict || strings.Contains(str, ResultFailure) {
return fmt.Errorf("%s. %w", str, ErrFailure)
} else if code != http.StatusOK {
return errors.New(str)
}
return nil
}
// DeferDo a common defer do used in dtmcli/dtmgrpc
func DeferDo(rerr *error, success func() error, fail func() error) {
defer func() {
if x := recover(); x != nil {
_ = fail()
panic(x)
} else if *rerr != nil {
_ = fail()
} else {
*rerr = success()
}
}()
}
// Escape solve CodeQL reported problem
func Escape(input string) string {
v := strings.Replace(input, "\n", "", -1)
v = strings.Replace(v, "\r", "", -1)
v = strings.Replace(v, ";", "", -1)
// v = strings.Replace(v, "'", "", -1)
return v
}
// EscapeGet escape get
func EscapeGet(qs url.Values, key string) string {
return Escape(qs.Get(key))
}
// InsertBarrier insert a record to barrier
func InsertBarrier(tx DB, transType string, gid string, branchID string, op string, barrierID string, reason string) (int64, error) {
if op == "" {
return 0, nil
}
sql := GetDBSpecial().GetInsertIgnoreTemplate(BarrierTableName+"(trans_type, gid, branch_id, op, barrier_id, reason) values(?,?,?,?,?,?)", "uniq_barrier")
return DBExec(tx, sql, transType, gid, branchID, op, barrierID, reason)
}
|
[
"\"IS_DOCKER\""
] |
[] |
[
"IS_DOCKER"
] |
[]
|
["IS_DOCKER"]
|
go
| 1 | 0 | |
cmd/gomobile/bind_test.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
"text/template"
)
func TestImportPackagesPathCleaning(t *testing.T) {
if runtime.GOOS == "android" {
t.Skip("not available on Android")
}
slashPath := "github.com/bilb02/go-mobile/example/bind/hello/"
pkgs, err := importPackages([]string{slashPath})
if err != nil {
t.Fatal(err)
}
p := pkgs[0]
if c := path.Clean(slashPath); p.ImportPath != c {
t.Errorf("expected %s; got %s", c, p.ImportPath)
}
}
func TestBindAndroid(t *testing.T) {
androidHome := os.Getenv("ANDROID_HOME")
if androidHome == "" {
t.Skip("ANDROID_HOME not found, skipping bind")
}
platform, err := androidAPIPath()
if err != nil {
t.Skip("No android API platform found in $ANDROID_HOME, skipping bind")
}
platform = strings.Replace(platform, androidHome, "$ANDROID_HOME", -1)
defer func() {
xout = os.Stderr
buildN = false
buildX = false
buildO = ""
buildTarget = ""
bindJavaPkg = ""
}()
buildN = true
buildX = true
buildO = "asset.aar"
buildTarget = "android/arm"
tests := []struct {
javaPkg string
}{
{
// Empty javaPkg
},
{
javaPkg: "com.example.foo",
},
}
for _, tc := range tests {
bindJavaPkg = tc.javaPkg
buf := new(bytes.Buffer)
xout = buf
gopath = filepath.SplitList(goEnv("GOPATH"))[0]
if goos == "windows" {
os.Setenv("HOMEDRIVE", "C:")
}
cmdBind.flag.Parse([]string{"github.com/bilb02/go-mobile/asset"})
err := runBind(cmdBind)
if err != nil {
t.Log(buf.String())
t.Fatal(err)
}
got := filepath.ToSlash(buf.String())
data := struct {
outputData
AndroidPlatform string
JavaPkg string
}{
outputData: defaultOutputData(),
AndroidPlatform: platform,
JavaPkg: tc.javaPkg,
}
wantBuf := new(bytes.Buffer)
if err := bindAndroidTmpl.Execute(wantBuf, data); err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
diff, err := diff(got, wantBuf.String())
if err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
if diff != "" {
t.Errorf("%+v: unexpected output:\n%s", tc, diff)
}
}
}
func TestBindIOS(t *testing.T) {
if !xcodeAvailable() {
t.Skip("Xcode is missing")
}
defer func() {
xout = os.Stderr
buildN = false
buildX = false
buildO = ""
buildTarget = ""
bindPrefix = ""
}()
buildN = true
buildX = true
buildO = "Asset.framework"
buildTarget = "ios/arm"
tests := []struct {
prefix string
}{
{
// empty prefix
},
{
prefix: "Foo",
},
}
for _, tc := range tests {
bindPrefix = tc.prefix
buf := new(bytes.Buffer)
xout = buf
gopath = filepath.SplitList(goEnv("GOPATH"))[0]
if goos == "windows" {
os.Setenv("HOMEDRIVE", "C:")
}
cmdBind.flag.Parse([]string{"github.com/bilb02/go-mobile/asset"})
err := runBind(cmdBind)
if err != nil {
t.Log(buf.String())
t.Fatal(err)
}
got := filepath.ToSlash(buf.String())
data := struct {
outputData
Prefix string
}{
outputData: defaultOutputData(),
Prefix: tc.prefix,
}
wantBuf := new(bytes.Buffer)
if err := bindIOSTmpl.Execute(wantBuf, data); err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
diff, err := diff(got, wantBuf.String())
if err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
if diff != "" {
t.Errorf("%+v: unexpected output:\n%s", tc, diff)
}
}
}
var bindAndroidTmpl = template.Must(template.New("output").Parse(`GOMOBILE={{.GOPATH}}/pkg/gomobile
WORK=$WORK
GOOS=android CGO_ENABLED=1 gobind -lang=go,java -outdir=$WORK{{if .JavaPkg}} -javapkg={{.JavaPkg}}{{end}} github.com/bilb02/go-mobile/asset
GOOS=android GOARCH=arm CC=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang CXX=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang++ CGO_ENABLED=1 GOARM=7 GOPATH=$WORK:$GOPATH GO111MODULE=off go build -x -buildmode=c-shared -o=$WORK/android/src/main/jniLibs/armeabi-v7a/libgojni.so gobind
PWD=$WORK/java javac -d $WORK/javac-output -source 1.7 -target 1.7 -bootclasspath {{.AndroidPlatform}}/android.jar *.java
jar c -C $WORK/javac-output .
`))
var bindIOSTmpl = template.Must(template.New("output").Parse(`GOMOBILE={{.GOPATH}}/pkg/gomobile
WORK=$WORK
GOOS=darwin CGO_ENABLED=1 gobind -lang=go,objc -outdir=$WORK -tags=ios{{if .Prefix}} -prefix={{.Prefix}}{{end}} github.com/bilb02/go-mobile/asset
GOARM=7 GOOS=darwin GOARCH=arm CC=iphoneos-clang CXX=iphoneos-clang++ CGO_CFLAGS=-isysroot=iphoneos -miphoneos-version-min=7.0 -fembed-bitcode -arch armv7 CGO_CXXFLAGS=-isysroot=iphoneos -miphoneos-version-min=7.0 -fembed-bitcode -arch armv7 CGO_LDFLAGS=-isysroot=iphoneos -miphoneos-version-min=7.0 -fembed-bitcode -arch armv7 CGO_ENABLED=1 GOPATH=$WORK:$GOPATH GO111MODULE=off go build -tags ios -x -buildmode=c-archive -o $WORK/asset-arm.a gobind
rm -r -f "Asset.framework"
mkdir -p Asset.framework/Versions/A/Headers
ln -s A Asset.framework/Versions/Current
ln -s Versions/Current/Headers Asset.framework/Headers
ln -s Versions/Current/Asset Asset.framework/Asset
xcrun lipo -create -arch armv7 $WORK/asset-arm.a -o Asset.framework/Versions/A/Asset
cp $WORK/src/gobind/{{.Prefix}}Asset.objc.h Asset.framework/Versions/A/Headers/{{.Prefix}}Asset.objc.h
mkdir -p Asset.framework/Versions/A/Headers
cp $WORK/src/gobind/Universe.objc.h Asset.framework/Versions/A/Headers/Universe.objc.h
mkdir -p Asset.framework/Versions/A/Headers
cp $WORK/src/gobind/ref.h Asset.framework/Versions/A/Headers/ref.h
mkdir -p Asset.framework/Versions/A/Headers
mkdir -p Asset.framework/Versions/A/Headers
mkdir -p Asset.framework/Versions/A/Resources
ln -s Versions/Current/Resources Asset.framework/Resources
mkdir -p Asset.framework/Resources
mkdir -p Asset.framework/Versions/A/Modules
ln -s Versions/Current/Modules Asset.framework/Modules
`))
|
[
"\"ANDROID_HOME\""
] |
[] |
[
"ANDROID_HOME"
] |
[]
|
["ANDROID_HOME"]
|
go
| 1 | 0 | |
pstem/stem-random-walk-nin-20-74/stem-random-walk-nin-20-74_examples.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import numpy as np
import tensorflow as tf
import cv2
from scipy.misc import imread
from scipy import ndimage as nd
import time
import os, random
from PIL import Image
from PIL import ImageDraw
import functools
import itertools
import collections
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training import device_setter
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.framework.python.ops import add_arg_scope
slim = tf.contrib.slim
tf.logging.set_verbosity(tf.logging.DEBUG)
tf.set_random_seed(1234)
scale = 0 #Make scale large to spped up initial testing
gen_features0 = 32 if not scale else 1
gen_features1 = 64 if not scale else 1
gen_features2 = 64 if not scale else 1
gen_features3 = 32 if not scale else 1
nin_features1 = 128 if not scale else 1
nin_features2 = 256 if not scale else 1
nin_features3 = 512 if not scale else 1
nin_features4 = 768 if not scale else 1
features1 = 64 if not scale else 1
features2 = 128 if not scale else 1
features3 = 256 if not scale else 1
features4 = 512 if not scale else 1
features5 = features4 if not scale else 1
num_global_enhancer_blocks = 6
num_local_enhancer_blocks = 3
data_dir = "//Desktop-sa1evjv/f/ARM_scans-crops/"
modelSavePeriod = 4. #Train timestep in hours
modelSavePeriod *= 3600 #Convert to s
model_dir = "//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/stem-random-walk-nin-20-74/"
shuffle_buffer_size = 5000
num_parallel_calls = 6
num_parallel_readers = 6
prefetch_buffer_size = 12
batch_size = 1
num_gpus = 1
#batch_size = 8 #Batch size to use during training
num_epochs = 1000000 #Dataset repeats indefinitely
logDir = "C:/dump/train/"
log_file = model_dir+"log.txt"
val_log_file = model_dir+"val_log.txt"
discr_pred_file = model_dir+"discr_pred.txt"
log_every = 1 #Log every _ examples
cumProbs = np.array([]) #Indices of the distribution plus 1 will be correspond to means
numMeans = 64 // batch_size
scaleMean = 4 #Each means array index increment corresponds to this increase in the mean
numDynamicGrad = 1 #Number of gradients to calculate for each possible mean when dynamically updating training
lossSmoothingBoxcarSize = 5
channels = 1 #Greyscale input image
#Sidelength of images to feed the neural network
cropsize = 512
use_mask = False #If true, supply mask to network as additional information
generator_input_size = cropsize
height_crop = width_crop = cropsize
discr_size = 70
weight_decay = 0.0
batch_decay_gen = 0.999
batch_decay_discr = 0.999
initial_learning_rate = 0.001
initial_discriminator_learning_rate = 0.001
num_workers = 1
increase_batch_size_by_factor = 1
effective_batch_size = increase_batch_size_by_factor*batch_size
save_result_every_n_batches = 25000
val_skip_n = 50
trainee_switch_skip_n = 1
max_num_since_training_change = 0
disp_select = False #Display selelected pixels upon startup
def int_shape(x):
return list(map(int, x.get_shape()))
def spectral_norm(w, iteration=1, count=0):
w0 = w
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u"+str(count),
[1, w_shape[-1]],
initializer=tf.random_normal_initializer(mean=0.,stddev=0.03),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
adjusted_mse_counter = 0
def adjusted_mse(img1, img2):
return tf.losses.mean_squared_error(img1, img2)
def pad(tensor, size):
d1_pad = size[0]
d2_pad = size[1]
paddings = tf.constant([[0, 0], [d1_pad, d1_pad], [d2_pad, d2_pad], [0, 0]], dtype=tf.int32)
padded = tf.pad(tensor, paddings, mode="REFLECT")
return padded
def gaussian_kernel(size: int,
mean: float,
std: float,
):
"""Makes 2D gaussian Kernel for convolution."""
d = tf.distributions.Normal(mean, std)
vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij', vals, vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
def blur(image):
gauss_kernel = gaussian_kernel( 1, 0., 1.5 )
#Expand dimensions of `gauss_kernel` for `tf.nn.conv2d` signature
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
#Convolve
image = pad(image, (1,1))
return tf.nn.conv2d(image, gauss_kernel, strides=[1, 1, 1, 1], padding="VALID")
#Track average MSEs
adjusted_mse_counter += 1
avg = tf.get_variable(
name=f"avg-{adjusted_mse_counter}",
shape=img1.get_shape(),
initializer=3*tf.ones(img1.get_shape()))
squared_errors = (img1 - img2)**2
update_avg = tf.assign(avg, 0.999*avg + 0.001*squared_errors)
with tf.control_dependencies([update_avg]):
#Errors for px with systematically higher MSEs are increased
scale = blur(avg)
scale /= tf.reduce_mean(scale)
mse = tf.reduce_mean( scale*squared_errors )
return mse
alrc_counter = 0
def alrc(loss, num_stddev=3, decay=0.999, mu1_start=25, mu2_start=30**2):
global alrc_counter; alrc_counter += 1
#Varables to track first two raw moments of the loss
mu = tf.get_variable(
f"mu-{alrc_counter}",
initializer=tf.constant(mu1_start, dtype=tf.float32))
mu2 = tf.get_variable(
f"mu2-{alrc_counter}",
initializer=tf.constant(mu2_start, dtype=tf.float32))
#Use capped loss for moment updates to limit the effect of extreme losses on the threshold
sigma = tf.sqrt(mu2 - mu**2+1.e-8)
loss = tf.where(loss < mu+num_stddev*sigma,
loss,
loss/tf.stop_gradient(loss/(mu+num_stddev*sigma)))
#Update moments
with tf.control_dependencies([mu.assign(decay*mu+(1-decay)*loss), mu2.assign(decay*mu2+(1-decay)*loss**2)]):
return tf.identity(loss)
capper_counter = 0
def capper_fn(x):
return alrc(x)
global capper_counter; capper_counter += 1
mu = tf.get_variable(f"mu-{capper_counter}", initializer=tf.constant(25, dtype=tf.float32))
mu2 = tf.get_variable(f"mu2-{capper_counter}", initializer=tf.constant(30**2, dtype=tf.float32))
def cap(x):
sigma = tf.sqrt(mu2 - mu**2+1.e-8)
capped_x = tf.cond(x < mu+3*sigma, lambda: x, lambda: x/tf.stop_gradient(x/(mu+3*sigma)))
return capped_x
x = cap(x)
with tf.control_dependencies([mu.assign(0.999*mu+0.001*x), mu2.assign(0.999*mu2+0.001*x**2)]):
return tf.cond(x <= 1, lambda: x, lambda: tf.sqrt(x + 1.e-8))
def generator_architecture(inputs, small_inputs, mask, small_mask, norm_decay, init_pass):
"""Generates fake data to try and fool the discrimator"""
with tf.variable_scope("Network", reuse=not init_pass):
def gaussian_noise(x, sigma=0.3, deterministic=False, name=''):
with tf.variable_scope(name):
if deterministic:
return x
else:
noise = tf.random_normal(shape=tf.shape(x), mean=0.0, stddev=sigma, dtype=tf.float32)
return x + noise
concat_axis = 3
def int_shape(x):
return list(map(int, x.get_shape()))
mu_counter = 0
def mean_only_batch_norm(input, decay=norm_decay, reuse_counter=None, init=init_pass):
mu = tf.reduce_mean(input, keepdims=True)
shape = int_shape(mu)
if not reuse_counter and init_pass: #Variable not being reused
nonlocal mu_counter
mu_counter += 1
running_mean = tf.get_variable("mu"+str(mu_counter),
dtype=tf.float32,
initializer=tf.constant(np.zeros(shape, dtype=np.float32)),
trainable=False)
else:
running_mean = tf.get_variable("mu"+str(mu_counter))
running_mean = decay*running_mean + (1-decay)*mu
mean_only_norm = input - running_mean
return mean_only_norm
def _actv_func(x, slope=0.01):
x = tf.nn.leaky_relu(x, slope)
return x
def get_var_maybe_avg(var_name, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
v = tf.get_variable(var_name, **kwargs)
if ema is not None:
v = ema.average(v)
return v
def get_vars_maybe_avg(var_names, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(vn, ema, **kwargs))
return vars
def mean_only_batch_norm_impl(x, pop_mean, b, is_conv_out=True, deterministic=False,
decay=norm_decay, name='meanOnlyBatchNormalization'):
'''
input comes in which is t=(g*V/||V||)*x
deterministic : separates training and testing phases
'''
with tf.variable_scope(name):
if deterministic:
# testing phase, return the result with the accumulated batch mean
return x - pop_mean + b
else:
# compute the current minibatch mean
if is_conv_out:
# using convolutional layer as input
m, _ = tf.nn.moments(x, [0,1,2])
else:
# using fully connected layer as input
m, _ = tf.nn.moments(x, [0])
# update minibatch mean variable
pop_mean_op = tf.assign(pop_mean, tf.scalar_mul(0.99, pop_mean) + tf.scalar_mul(1-0.99, m))
with tf.control_dependencies([pop_mean_op]):
return x - m + b
def batch_norm_impl(x,is_conv_out=True, deterministic=False, decay=norm_decay, name='BatchNormalization'):
with tf.variable_scope(name):
scale = tf.get_variable('scale',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.ones_initializer(),trainable=True)
beta = tf.get_variable('beta',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.zeros_initializer(),trainable=True)
pop_mean = tf.get_variable('pop_mean',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.zeros_initializer(), trainable=False)
pop_var = tf.get_variable('pop_var',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.ones_initializer(), trainable=False)
if deterministic:
return tf.nn.batch_normalization(x,pop_mean,pop_var,beta,scale,0.001)
else:
if is_conv_out:
batch_mean, batch_var = tf.nn.moments(x,[0,1,2])
else:
batch_mean, batch_var = tf.nn.moments(x,[0])
pop_mean_op = tf.assign(pop_mean, pop_mean * 0.99 + batch_mean * (1 - 0.99))
pop_var_op = tf.assign(pop_var, pop_var * 0.99 + batch_var * (1 - 0.99))
with tf.control_dependencies([pop_mean_op, pop_var_op]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, scale, 0.001)
conv2d_counter = 0
def conv2d(x, num_filters, stride=1, filter_size=3, pad='SAME', nonlinearity=_actv_func, init_scale=1., init=init_pass,
use_weight_normalization=True, use_batch_normalization=False, mean_only_norm=False,
deterministic=False, slope=0.01):
filter_size = [filter_size,filter_size]
stride = [stride,stride]
'''
deterministic : used for batch normalizations (separates the training and testing phases)
'''
nonlocal conv2d_counter
conv2d_counter += 1
name = 'conv'+str(conv2d_counter)
with tf.variable_scope(name):
V = tf.get_variable('V', shape=filter_size+[int(x.get_shape()[-1]),num_filters], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
if use_batch_normalization is False: # not using bias term when doing batch normalization, avoid indefinit growing of the bias, according to BN2015 paper
b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
if mean_only_norm:
pop_mean = tf.get_variable('meanOnlyBatchNormalization/pop_mean',shape=[num_filters],
dtype=tf.float32, initializer=tf.zeros_initializer(),trainable=False)
if use_weight_normalization:
g = tf.get_variable('g', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
if init:
v_norm = tf.nn.l2_normalize(V,[0,1,2])
x = tf.nn.conv2d(x, v_norm, strides=[1] + stride + [1],padding=pad)
m_init, v_init = tf.nn.moments(x, [0,1,2])
scale_init=init_scale/tf.sqrt(v_init + 1e-08)
g = g.assign(scale_init)
b = b.assign(-m_init*scale_init)
x = tf.reshape(scale_init,[1,1,1,num_filters])*(x-tf.reshape(m_init,[1,1,1,num_filters]))
else:
W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])
if mean_only_norm: # use weight-normalization combined with mean-only-batch-normalization
x = tf.nn.conv2d(x,W,strides=[1]+stride+[1],padding=pad)
x = mean_only_batch_norm_impl(x,pop_mean,b,is_conv_out=True, deterministic=deterministic)
else:
# use just weight-normalization
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)
elif use_batch_normalization:
x = tf.nn.conv2d(x,V,[1]+stride+[1],pad)
x = batch_norm_impl(x,is_conv_out=True,deterministic=deterministic)
else:
x = tf.nn.bias_add(tf.nn.conv2d(x,V,strides=[1]+stride+[1],padding=pad),b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x, slope)
return x
deconv2d_counter = 0
def deconv2d(x, num_filters, stride=1, filter_size=3, pad='SAME', nonlinearity=_actv_func,
init_scale=1., init=init_pass,
use_weight_normalization=True, use_batch_normalization=False, mean_only_norm=True,
deterministic=False, name='', slope=0.01):
filter_size = [filter_size,filter_size]
stride = [stride,stride]
'''
deterministic : used for batch normalizations (separates the training and testing phases)
'''
nonlocal deconv2d_counter
deconv2d_counter += 1
name = 'deconv'+str(deconv2d_counter)
xs = int_shape(x)
if pad=='SAME':
target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
else:
target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
with tf.variable_scope(name):
V = tf.get_variable('V', shape=filter_size+[num_filters,int(x.get_shape()[-1])], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
#V = tf.get_variable('V', shape=filter_size+[int(x.get_shape()[-1]), num_filters], dtype=tf.float32,
# initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
if use_batch_normalization is False: # not using bias term when doing batch normalization, avoid indefinit growing of the bias, according to BN2015 paper
b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
if mean_only_norm:
pop_mean = tf.get_variable('meanOnlyBatchNormalization/pop_mean',shape=[num_filters], dtype=tf.float32, initializer=tf.zeros_initializer(),trainable=False)
if use_weight_normalization:
g = tf.get_variable('g', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
if init:
v_norm = tf.nn.l2_normalize(V,[0,1,2])
x = tf.nn.conv2d_transpose(x, v_norm, target_shape, strides=[1] + stride + [1],padding=pad)
m_init, v_init = tf.nn.moments(x, [0,1,2])
scale_init=init_scale/tf.sqrt(v_init + 1e-08)
g = g.assign(scale_init)
b = b.assign(-m_init*scale_init)
x = tf.reshape(scale_init,[1,1,1,num_filters])*(x-tf.reshape(m_init,[1,1,1,num_filters]))
else:
W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(V, [0, 1, 2])
if mean_only_norm: # use weight-normalization combined with mean-only-batch-normalization
x = tf.nn.conv2d_transpose(x,W,target_shape,strides=[1]+stride+[1],padding=pad)
x = mean_only_batch_norm_impl(x,pop_mean,b,is_conv_out=True, deterministic=deterministic)
else:
# use just weight-normalization
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)
elif use_batch_normalization:
x = tf.nn.conv2d(x,V,[1]+stride+[1],pad)
x = batch_norm_impl(x,is_conv_out=True,deterministic=deterministic)
else:
x = tf.nn.bias_add(tf.nn.conv2d(x,V,strides=[1]+stride+[1],padding=pad),b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x, slope)
return x
def xception_middle_block(input, features):
main_flow = conv2d(
x=input,
num_filters=features,
stride=1)
main_flow = conv2d(
x=main_flow,
num_filters=features,
stride=1)
main_flow = conv2d(
x=main_flow,
num_filters=features,
stride=1)
return main_flow + input
def init_batch_norm(x):
batch_mean, batch_var = tf.nn.moments(x,[0])
return (x - batch_mean) / np.sqrt( batch_var + 0.001 )
def network_in_network(input, nin_features_out, mask=None):
if use_mask:
concatenation = tf.concat(values=[input, mask], axis=concat_axis)
else:
concatenation = input
with tf.variable_scope("Inner"):
nin = conv2d(concatenation, 64, 1,
filter_size=5,
mean_only_norm=True,
use_weight_normalization=not use_mask, slope=0.1)
residuals = False
if residuals:
nin = conv2d(nin, nin_features1, 2, slope=0.1)
nin1 = nin
nin = conv2d(nin, nin_features2, 2, slope=0.1)
nin2 = nin
nin = conv2d(nin, nin_features3, 2, slope=0.1)
nin3 = nin
nin = conv2d(nin, nin_features4, 2, slope=0.1)
for _ in range(num_global_enhancer_blocks):
nin = xception_middle_block(nin, nin_features4)
nin = deconv2d(nin, nin_features3, 2)
nin += nin3
nin = deconv2d(nin, nin_features2, 2)
nin += nin2
nin = deconv2d(nin, nin_features1, 2)
nin += nin1
nin = deconv2d(nin, nin_features_out, 2)
else:
nin = conv2d(nin, nin_features1, 2)
nin = conv2d(nin, nin_features2, 2)
nin = conv2d(nin, nin_features3, 2)
nin = conv2d(nin, nin_features4, 2)
for _ in range(num_global_enhancer_blocks):
nin = xception_middle_block(nin, nin_features4)
nin = deconv2d(nin, nin_features3, 2)
nin = deconv2d(nin, nin_features2, 2)
nin = deconv2d(nin, nin_features1, 2)
nin = deconv2d(nin, nin_features_out, 2)
with tf.variable_scope("Trainer"):
inner = conv2d(nin, 64, 1)
inner = conv2d(inner, 1, 1, mean_only_norm=False, nonlinearity=None)
return nin, inner
##Model building
if not init_pass:
input = inputs
small_input = small_inputs
else:
input = tf.random_uniform(shape=int_shape(inputs), minval=-0.8, maxval=0.8)
input *= mask
small_input = tf.image.resize_images(input, (cropsize//2, cropsize//2))
with tf.variable_scope("Inner"):
if not use_mask:
nin, inner = network_in_network(small_input, gen_features1)
else:
nin, inner = network_in_network(small_input, gen_features1, small_mask)
with tf.variable_scope("Outer"):
if use_mask:
concatenation = tf.concat(values=[input, mask], axis=concat_axis)
else:
concatenation = input
enc = conv2d(x=concatenation,
num_filters=gen_features0,
stride=1,
filter_size=5,
mean_only_norm=not use_mask, slope=0.1)
enc = conv2d(enc, gen_features1, 2, slope=0.1)
enc = enc + nin
for _ in range(num_local_enhancer_blocks):
enc = xception_middle_block(enc, gen_features2)
enc = deconv2d(enc, gen_features3, 2)
enc = conv2d(enc, gen_features3, 1)
outer = conv2d(enc, 1, 1, mean_only_norm=False, nonlinearity=None)
return inner, outer
def discriminator_architecture(inputs, second_input=None, phase=False, params=None,
gen_loss=0., reuse=False):
"""Three discriminators to discriminate between two data discributions"""
with tf.variable_scope("GAN/Discr", reuse=reuse):
def int_shape(x):
return list(map(int, x.get_shape()))
#phase = mode == tf.estimator.ModeKeys.TRAIN #phase is true during training
concat_axis = 3
def _instance_norm(net, train=phase):
batch, rows, cols, channels = [i.value for i in net.get_shape()]
var_shape = [channels]
mu, sigma_sq = tf.nn.moments(net, [1,2], keep_dims=True)
shift = tf.Variable(tf.zeros(var_shape), trainable=False)
scale = tf.Variable(tf.ones(var_shape), trainable=False)
epsilon = 1.e-3
normalized = (net - mu) / (sigma_sq + epsilon)**(.5)
return scale*normalized + shift
def instance_then_activ(input):
batch_then_activ = _instance_norm(input)
batch_then_activ = tf.nn.relu(batch_then_activ)
return batch_then_activ
##Reusable blocks
def _batch_norm_fn(input):
batch_norm = tf.contrib.layers.batch_norm(
input,
epsilon=0.001,
decay=0.999,
center=True,
scale=True,
is_training=phase,
fused=True,
zero_debias_moving_mean=False,
renorm=False)
return batch_norm
def batch_then_activ(input): #Changed to instance norm for stability
batch_then_activ = input#_instance_norm(input)
batch_then_activ = tf.nn.leaky_relu(batch_then_activ, alpha=0.2)
return batch_then_activ
def conv_block_not_sep(input, filters, kernel_size=3, phase=phase, batch_and_activ=True):
"""
Convolution -> batch normalisation -> leaky relu
phase defaults to true, meaning that the network is being trained
"""
conv_block = slim.conv2d(
inputs=input,
num_outputs=filters,
kernel_size=kernel_size,
padding="SAME",
activation_fn=None)
if batch_and_activ:
conv_block = batch_then_activ(conv_block)
return conv_block
def conv_block(input, filters, phase=phase):
"""
Convolution -> batch normalisation -> leaky relu
phase defaults to true, meaning that the network is being trained
"""
conv_block = strided_conv_block(input, filters, 1, 1)
return conv_block
count = 0
def discr_conv_block(input, filters, stride, rate=1, phase=phase, kernel_size=3, actv=True):
nonlocal count
count += 1
w = tf.get_variable("kernel"+str(count), shape=[kernel_size, kernel_size, input.get_shape()[-1], filters])
b = tf.get_variable("bias"+str(count), [filters], initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(input=input, filter=spectral_norm(w, count=count),
strides=[1, stride, stride, 1], padding='VALID') + b
if actv:
x = batch_then_activ(x)
return x
def residual_conv(input, filters):
residual = slim.conv2d(
inputs=input,
num_outputs=filters,
kernel_size=1,
stride=2,
padding="SAME",
activation_fn=None)
residual = batch_then_activ(residual)
return residual
def xception_encoding_block(input, features):
cnn = conv_block(
input=input,
filters=features)
cnn = conv_block(
input=cnn,
filters=features)
cnn = strided_conv_block(
input=cnn,
filters=features,
stride=2)
residual = residual_conv(input, features)
cnn += residual
return cnn
def xception_encoding_block_diff(input, features_start, features_end):
cnn = conv_block(
input=input,
filters=features_start)
cnn = conv_block(
input=cnn,
filters=features_start)
cnn = strided_conv_block(
input=cnn,
filters=features_end,
stride=2)
residual = residual_conv(input, features_end)
cnn += residual
return cnn
def xception_middle_block(input, features):
main_flow = strided_conv_block(
input=input,
filters=features,
stride=1)
main_flow = strided_conv_block(
input=main_flow,
filters=features,
stride=1)
main_flow = strided_conv_block(
input=main_flow,
filters=features,
stride=1)
return main_flow + input
def shared_flow(input, layers):
shared = xception_encoding_block_diff(input, features2, features3)
layers.append(shared)
shared = xception_encoding_block_diff(shared, features3, features4)
layers.append(shared)
shared = xception_encoding_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
return shared, layers
def terminating_fc(input):
fc = tf.reduce_mean(input, [1,2])
fc = tf.reshape(fc, (-1, features5))
fc = tf.contrib.layers.fully_connected(inputs=fc,
num_outputs=1,
activation_fn=None)
return fc
def max_pool(input, size=2, stride=2):
pool = tf.contrib.layers.max_pool2d(inputs=input,
kernel_size=size,
stride=stride,
padding='SAME')
return pool
testing_scale = 1
features1 = 64 // testing_scale
features2 = 128 // testing_scale
features3 = 256 // testing_scale
features4 = 512 // testing_scale
def discriminate(x):
"""Discriminator architecture"""
x = discr_conv_block(x, features1, 2, 1, kernel_size=4)
x = discr_conv_block(x, features2, 2, 1, kernel_size=4)
x = discr_conv_block(x, features3, 2, 1, kernel_size=4)
#x = discr_conv_block(x, features3, 1, 1, kernel_size=4)
x = discr_conv_block(x, features4, 2, 1, kernel_size=4)
x = tf.reduce_sum(x, axis=[1,2,3])
#shape = int_shape(x)
#x = tf.reshape(x, (-1, shape[1]*shape[2]*shape[3]))
#x = tf.contrib.layers.fully_connected(
# inputs=x, num_outputs=1, biases_initializer=None, activation_fn=None)
return x
'''Model building'''
with tf.variable_scope("small", reuse=reuse) as small_scope:
small = inputs[0]
small = discriminate(small)
with tf.variable_scope("medium", reuse=reuse) as medium_scope:
medium = inputs[1]
medium = discriminate(medium)
with tf.variable_scope("large", reuse=reuse) as large_scope:
large = inputs[2]
large = discriminate(large)
discriminations = []
for x in [small, medium, large]:
clipped = x#tf.clip_by_value(x, clip_value_min=0, clip_value_max=1000) #5*l2_norm
discriminations.append( clipped )
return discriminations
def experiment(feature, ground_truth, mask, learning_rate_ph, discr_lr_ph, beta1_ph,
discr_beta1_ph, norm_decay, train_outer_ph, ramp_ph, initialize):
def pad(tensor, size):
d1_pad = size[0]
d2_pad = size[1]
paddings = tf.constant([[0, 0], [d1_pad, d1_pad], [d2_pad, d2_pad], [0, 0]], dtype=tf.int32)
padded = tf.pad(tensor, paddings, mode="REFLECT")
return padded
def gaussian_kernel(size: int,
mean: float,
std: float,
):
"""Makes 2D gaussian Kernel for convolution."""
d = tf.distributions.Normal(mean, std)
vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij', vals, vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
def blur(image):
gauss_kernel = gaussian_kernel( 2, 0., 2.5 )
#Expand dimensions of `gauss_kernel` for `tf.nn.conv2d` signature
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
#Convolve
image = pad(image, (2,2))
return tf.nn.conv2d(image, gauss_kernel, strides=[1, 1, 1, 1], padding="VALID")
def get_multiscale_crops(input, multiscale_channels=1):
"""Assumes square inputs"""
input = pad(input, (2*discr_size, 2*discr_size)) #Extra padding to reduce periodic artefacts
s = int_shape(input)
small = tf.random_crop(
input,
size=(batch_size, discr_size, discr_size, multiscale_channels))
small = tf.image.resize_images(small, (discr_size, discr_size))
medium = tf.random_crop(
input,
size=(batch_size, 2*discr_size, 2*discr_size, multiscale_channels))
medium = tf.image.resize_images(medium, (discr_size, discr_size))
large = tf.random_crop(
input,
size=(batch_size, 4*discr_size, 4*discr_size, multiscale_channels))
large = tf.image.resize_images(large, (discr_size, discr_size))
return small, medium, large
#Generator
feature = tf.reshape(feature, [-1, cropsize, cropsize, channels])
feature_small = tf.image.resize_images(feature, (cropsize//2, cropsize//2))
truth = tf.reshape(ground_truth, [-1, cropsize, cropsize, channels])
truth_small = tf.image.resize_images(truth, (cropsize//2, cropsize//2))
small_mask = tf.image.resize_images(mask, (cropsize//2, cropsize//2))
if initialize:
print("Started initialization")
_, _ = generator_architecture(
feature, feature_small, mask, small_mask, norm_decay, init_pass=True)
print("Initialized")
output_inner, output_outer = generator_architecture(
feature, feature_small, mask, small_mask, norm_decay, init_pass=False)
print("Architecture ready")
#Blurred images
blur_truth_small = blur(truth_small)
blur_output_inner = blur(output_inner)
blur_truth = blur(truth)
blur_output_outer = blur(output_outer)
#Trainable parameters
model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network")
model_params_inner = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network/Inner/Inner")
model_params_trainer = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network/Inner/Trainer")
model_params_outer = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network/Outer")
##Discriminators
#Intermediate image for gradient penalty calculation
epsilon = tf.random_uniform(
shape=[2, 1, 1, 1, 1],
minval=0.,
maxval=1.)
X_hat_outer = (1-epsilon[0])*truth + epsilon[0]*output_outer
X_hat_inner = (1-epsilon[1])*blur_truth_small + epsilon[1]*output_inner
discr_inputs_outer = [output_outer, truth, X_hat_outer]
discr_inputs_inner = [output_inner, blur_truth_small, X_hat_inner]
#Crop images at multiple scales at the same places for each scale
concat_outer = tf.concat(discr_inputs_outer, axis=3)
concat_inner = tf.concat(discr_inputs_inner, axis=3)
num_channels_outer = len(discr_inputs_outer)
num_channels_inner = len(discr_inputs_inner)
multiscale_crops_outer = get_multiscale_crops(concat_outer, multiscale_channels=num_channels_outer)
multiscale_crops_inner = get_multiscale_crops(concat_inner, multiscale_channels=num_channels_inner)
multiscale_crops_outer = [tf.unstack(crop, axis=3) for crop in multiscale_crops_outer]
multiscale_crops_inner = [tf.unstack(crop, axis=3) for crop in multiscale_crops_inner]
#Sort crops into categories
shape = (batch_size, discr_size, discr_size, channels)
crops_set_outer = []
for crops in multiscale_crops_outer:
crops_set_outer.append( [tf.reshape(unstacked, shape) for unstacked in crops] )
crops_set_inner = []
for crops in multiscale_crops_inner:
crops_set_inner.append( [tf.reshape(unstacked, shape) for unstacked in crops] )
#Get intermediate representations
multiscale_xhat_outer = [m[2] for m in crops_set_outer]
multiscale_xhat_inner = [m[2] for m in crops_set_inner]
#Concatenate so the crops can be processed as a single batch
multiscale_outer = []
for crops in crops_set_outer:
multiscale_outer.append( tf.concat(crops, axis=0) )
multiscale_inner = []
for crops in crops_set_inner:
multiscale_inner.append( tf.concat(crops, axis=0) )
_discrimination_outer = discriminator_architecture( multiscale_outer )
_discrimination_inner = discriminator_architecture( multiscale_inner, reuse=True )
model_params_discr_small = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="GAN/Discr/small")
model_params_discr_medium = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="GAN/Discr/medium")
model_params_discr_large = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="GAN/Discr/large")
model_params_discrs = [model_params_discr_small,
model_params_discr_medium,
model_params_discr_large]
#Separate batch into discrimination categories
discr_of_output_outer = [d[0] for d in _discrimination_outer]
discr_of_truth = [d[1] for d in _discrimination_outer]
discr_of_X_hat_outer = [d[2] for d in _discrimination_outer]
discr_of_output_inner = [d[0] for d in _discrimination_inner]
discr_of_truth_small = [d[1] for d in _discrimination_inner]
discr_of_X_hat_inner = [d[2] for d in _discrimination_inner]
pred_real_outer = 0.
pred_fake_outer = 0.
avg_d_grads_outer = 0.
d_losses_outer = []
pred_real_inner = 0.
pred_fake_inner = 0.
avg_d_grads_inner = 0.
d_losses_inner = []
wass_weight = 1.
gradient_penalty_weight = 10.
l2_inner_weight = 5.e-5
l2_outer_weight = 5.e-5
def get_gradient_penalty(_discr_of_X_hat, _multiscale_xhat):
grad_D_X_hat = tf.gradients(_discr_of_X_hat, [_multiscale_xhat])[0]
red_idx = [i for i in range(2, _multiscale_xhat.shape.ndims)]
slopes = tf.sqrt(1.e-8+tf.reduce_sum(tf.square(grad_D_X_hat), axis=red_idx))
gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
return gradient_penalty
##Losses and train ops
wass_loss_for_gen_outer = 0.
wass_loss_for_gen_inner = 0.
wass_loss_for_discr_outer = 0.
wass_loss_for_discr_inner = 0.
for i in range(3): #Discrimination is on 3 scales
#wasserstein_loss_outer = discr_of_output_outer[i] - discr_of_truth[i]
#wasserstein_loss_inner = discr_of_output_inner[i] - discr_of_truth_small[i]
#wass_loss_for_discr_outer += wasserstein_loss_outer
#wass_loss_for_discr_inner += wasserstein_loss_inner
#wass_loss_for_gen_outer += -discr_of_output_outer[i]
#wass_loss_for_gen_inner += -discr_of_output_inner[i]
gradient_penalty_outer = 0.#get_gradient_penalty(discr_of_X_hat_outer[i], multiscale_xhat_outer[i])
gradient_penalty_inner = 0.#get_gradient_penalty(discr_of_X_hat_inner[i], multiscale_xhat_inner[i])
wasserstein_loss_outer = tf.pow(discr_of_truth[i]-1., 2) + tf.pow(discr_of_output_outer[i], 2)
wasserstein_loss_inner = tf.pow(discr_of_truth_small[i]-1., 2) + tf.pow(discr_of_output_inner[i], 2)
wass_loss_for_discr_outer += wasserstein_loss_outer
wass_loss_for_discr_inner += wasserstein_loss_inner
wass_loss_for_gen_outer += tf.pow(discr_of_output_outer[i]-1., 2)
wass_loss_for_gen_inner += tf.pow(discr_of_output_inner[i]-1, 2)
pred_real_outer += discr_of_truth[i]
pred_fake_outer += discr_of_output_outer[i]
avg_d_grads_outer += gradient_penalty_outer
pred_real_inner += discr_of_truth_small[i]
pred_fake_inner += discr_of_output_inner[i]
avg_d_grads_inner += gradient_penalty_inner
d_loss_outer = wass_weight*wasserstein_loss_outer + gradient_penalty_weight*gradient_penalty_outer
d_loss_inner = wass_weight*wasserstein_loss_inner + gradient_penalty_weight*gradient_penalty_inner
d_losses_outer.append(d_loss_outer)
d_losses_inner.append(d_loss_inner)
mse_inner = 200*adjusted_mse(blur_truth_small, output_inner)
mse_inner = capper_fn(mse_inner)
#mse_inner = 2.*tf.cond( mse_inner < 1, lambda: mse_inner, lambda: tf.sqrt(mse_inner+1.e-8) )
#mse_inner = tf.minimum(mse_inner, 50)
mse_outer = 200*adjusted_mse(blur_truth, output_outer)
mse0 = tf.reduce_mean( (blur_truth - output_outer)**2 )
mse_outer = capper_fn(mse_outer)
#mse_outer = 2.*tf.cond( mse_outer < 1, lambda: mse_outer, lambda: tf.sqrt(mse_outer+1.e-8) )
#mse_outer = tf.minimum(mse_outer, 50) #Safegaurd against error spikes
mse_outer_together = 200*adjusted_mse(blur_truth, blur_output_outer)
mse_outer_together = capper_fn(mse_outer_together)
#mse_outer_together = 2.*tf.cond( mse_outer < 1, lambda: mse_outer, lambda: tf.sqrt(mse_outer+1.e-8) )
#mse_inner = 10*tf.reduce_mean(tf.abs( blur_truth_small - blur_output_inner ))
#mse_outer = 10*tf.reduce_mean(tf.abs( blur_truth - blur_output_outer ))
loss = mse_outer_together + wass_loss_for_gen_outer
loss_inner = mse_inner
loss_outer = mse_outer
train_ops_discr = []
for i in range(3):
d_loss = tf.cond( train_outer_ph, lambda: d_losses_outer[i], lambda: d_losses_inner[i] )
d_train_op = tf.train.AdamOptimizer(discr_lr_ph, 0.9).minimize(
d_loss, var_list=model_params_discrs[i])
train_ops_discr.append(d_train_op)
#Provision inner network with an ancillary loss tower
train_op_trainer = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
2*loss_inner, var_list=model_params_trainer)
train_op_inner_start = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss_inner+loss_outer, var_list=model_params_inner)
train_op_inner_end = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss_inner+loss, var_list=model_params_inner)
train_op_outer_start = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss_outer, var_list=model_params_outer)
train_op_outer_end = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss, var_list=model_params_outer)
start_train_ops = [train_op_inner_start, train_op_outer_start, train_op_trainer]
end_train_ops = [train_op_inner_end, train_op_outer_end, train_op_trainer]
errors = tf.to_double((100*blur_truth - 100*output_outer)**2)
return {'start_train_ops': start_train_ops,
'end_train_ops': end_train_ops,
'train_ops_discr': train_ops_discr,
'output_inner': output_inner,
'output_outer': output_outer,
'mse_inner': mse_inner,
'mse_outer': mse_outer,
'wass_loss_inner': wass_loss_for_gen_inner,
'wass_loss_outer': wass_loss_for_gen_outer,
'wass_loss_d_inner': wass_loss_for_discr_inner,
'wass_loss_d_outer': wass_loss_for_discr_outer,
'errors': errors,
"mse0": mse0
}
def flip_rotate(img):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
choice = np.random.randint(0, 8)
if choice == 0:
return img
if choice == 1:
return np.rot90(img, 1)
if choice == 2:
return np.rot90(img, 2)
if choice == 3:
return np.rot90(img, 3)
if choice == 4:
return np.flip(img, 0)
if choice == 5:
return np.flip(img, 1)
if choice == 6:
return np.flip(np.rot90(img, 1), 0)
if choice == 7:
return np.flip(np.rot90(img, 1), 1)
def load_image(addr, resize_size=cropsize, img_type=np.float32):
"""Read an image and make sure it is of the correct type. Optionally resize it"""
#addr = "Z:/Jeffrey-Ede/models/stem-random-walk-nin-20-1/truth-1000.tif"
try:
img = imread(addr, mode='F')
except:
img = np.zeros((cropsize,cropsize))
print("Image read failed")
if resize_size and resize_size != cropsize:
img = cv2.resize(img, (resize_size, resize_size), interpolation=cv2.INTER_AREA)
return img.astype(img_type)
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def norm_img(img):
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
return img.astype(np.float32)
def preprocess(img):
img[np.isnan(img)] = 0.
img[np.isinf(img)] = 0.
img = norm_img(img)
return img
def gen_random_walk(channel_width, channel_height=cropsize, amplitude=1, beta1=0., shift=0., steps=10):
walk = np.zeros((int(np.ceil(channel_width+shift)), channel_height))
halfway = (channel_width-1)/2
center = halfway+shift
size = int(np.ceil(channel_width+shift))
mom = 0.
y = 0.
for i in range(channel_height):
y1 = y
#Get new position and adjust momentum
step_y = random.randint(0, 1)
if step_y == 1:
mom = beta1*mom + (1-beta1)*amplitude*(1 + np.random.normal())
y += mom
else:
y = amplitude*(-1 + np.random.normal())
if y < -halfway:
y = -halfway
mom = -mom
elif y > halfway:
y = halfway
mom = -mom
#Move to position in steps
y2 = y
scale = np.sqrt(1+(y2-y1)**2)
for j in range(steps):
x = (j+1)/steps
y = (y2-y1)*x + y1
y_idx = center+y
if y_idx != np.ceil(y_idx):
if int(y_idx) < size:
walk[int(y_idx), i] += scale*(np.ceil(y_idx) - y_idx)/steps
if int(y_idx)+1 < size:
walk[int(y_idx)+1, i] += scale*(1.-(np.ceil(y_idx) - y_idx))/steps
else:
walk[int(y_idx), i] = scale*1
return walk, size
#def make_mask(use_frac, amp, steps):
# channel_size = (2+np.sqrt(4-4*4*use_frac)) / (2*use_frac)
# num_channels = cropsize / channel_size
# mask = np.zeros( (cropsize, cropsize) )
# for i in range( int(num_channels) ):
# shift = i*channel_size - np.floor(i*channel_size)
# walk, size = gen_random_walk(channel_width=channel_size, amplitude=amp, beta1=0.5, shift=shift, steps=steps)
# lower_idx = np.floor(i*channel_size)
# upper_idx = int(lower_idx)+size
# if upper_idx < cropsize:
# mask[int(lower_idx):upper_idx, :] = walk
# else:
# diff = int(upper_idx)-int(cropsize)
# mask[int(lower_idx):int(upper_idx)-diff, :] = walk[0:(size-diff), :]
# return mask
def make_mask(use_frac):
mask = inspiral(use_frac, cropsize)
return mask
def fill(data, invalid=None):
"""
Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell
Input:
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'. True cells set where data
value should be replaced.
If None (default), use: invalid = np.isnan(data)
Output:
Return a filled array.
"""
#import numpy as np
#import scipy.ndimage as nd
if invalid is None: invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid, return_distances=False, return_indices=True)
return data[tuple(ind)]
def gen_lq(img0):
img = norm_img(cv2.GaussianBlur(img0,(5,5), 2.5))
steps = 25
use_frac = 1/16
amp = 5.
mask = make_mask(use_frac)
#mask = mask.clip(0., 1.)
#print(np.sum(mask)/(512**2))
select = mask > 0
#Combine with uniform noise low detection time data is less meaningful
detection = mask*img0#mask * ( mask*img0 + 2*(1-mask)*np.random.rand(*img0.shape)*img )
lq = -np.ones(img.shape)
lq[select] = detection[select]
lq = scale0to1(lq)
lq = fill(lq, invalid=np.logical_not(mask.astype(np.bool)))
#Changed img to img0 halfway through training
return img0.astype(np.float32), lq.astype(np.float32), mask.astype(np.float32)
def inspiral(coverage, side, num_steps=10_000):
"""Duration spent at each location as a particle falls in a magnetic
field. Trajectory chosen so that the duration density is (approx.)
evenly distributed. Trajectory is calculated stepwise.
Args:
coverage: Average amount of time spent at a random pixel
side: Sidelength of square image that the motion is
inscribed on.
Returns:
Amounts of time spent at each pixel on a square image as a charged
particle inspirals.
"""
#Use size that is larger than the image
size = int(np.ceil(np.sqrt(2)*side))
#Maximum radius of motion
R = size/2
#Get constant in equation of motion
k = 1/ (2*np.pi*coverage)
#Maximum theta that is in the image
theta_max = R / k
#Equispaced steps
theta = np.arange(0, theta_max, theta_max/num_steps)
r = k * theta
#Convert to cartesian, with (0,0) at the center of the image
x = r*np.cos(theta) + R
y = r*np.sin(theta) + R
#Draw spiral
z = np.empty((x.size + y.size,), dtype=x.dtype)
z[0::2] = x
z[1::2] = y
z = list(z)
img = Image.new('F', (size,size), "black")
img_draw = ImageDraw.Draw(img)
img_draw = img_draw.line(z)
img = np.asarray(img)
img = img[size//2-side//2:size//2+side//2+side%2,
size//2-side//2:size//2+side//2+side%2]
#Blur path
#img = cv2.GaussianBlur(img,(3,3),0)
return img
def record_parser(record):
"""Parse files and generate lower quality images from them."""
img = flip_rotate(preprocess(load_image(record)))
img, lq, mask = gen_lq(img)
if np.sum(np.isfinite(img)) != cropsize**2 or np.sum(np.isfinite(lq)) != cropsize**2:
img = np.zeros((cropsize,cropsize))
lq = mask*img
return lq, img, mask
def reshaper(img1, img2, img3):
img1 = tf.reshape(img1, [cropsize, cropsize, channels])
img2 = tf.reshape(img2, [cropsize, cropsize, channels])
img3 = tf.reshape(img3, [cropsize, cropsize, channels])
return img1, img2, img3
def input_fn(dir, subset, batch_size, num_shards):
"""Create a dataset from a list of filenames and shard batches from it"""
with tf.device('/cpu:0'):
dataset = tf.data.Dataset.list_files(dir+subset+"/"+"*.tif")
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.map(
lambda file: tf.py_func(record_parser, [file], [tf.float32, tf.float32, tf.float32]),
num_parallel_calls=num_parallel_calls)
#print(dataset.output_shapes, dataset.output_types)
dataset = dataset.map(reshaper, num_parallel_calls=num_parallel_calls)
#print(dataset.output_shapes, dataset.output_types)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=prefetch_buffer_size)
iter = dataset.make_one_shot_iterator()
img_batch = iter.get_next()
if num_shards <= 1:
# No GPU available or only 1 GPU.
return [img_batch[0]], [img_batch[1]], [img_batch[2]]
else:
image_batch = tf.unstack(img_batch, num=batch_size, axis=1)
feature_shards = [[] for i in range(num_shards)]
feature_shards_truth = [[] for i in range(num_shards)]
for i in range(batch_size):
idx = i % num_shards
tensors = tf.unstack(image_batch[i], num=2, axis=0)
feature_shards[idx].append(tensors[0])
feature_shards_truth[idx].append(tensors[1])
feature_shards_mask[idx].append(tensors[2])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
feature_shards_truth = [tf.parallel_stack(x) for x in feature_shards_truth]
feature_shards_mask = [tf.parallel_stack(x) for x in feature_shards_mask]
return feature_shards, feature_shards_truth, feature_shards_mask
def disp(img):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
if disp_select:
disp(select)
class RunConfig(tf.contrib.learn.RunConfig):
def uid(self, whitelist=None):
"""
Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = run_config._DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = collections.OrderedDict(
sorted(ordered_state['_cluster_spec'].as_dict().items(), key=lambda t: t[0]))
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
def sigmoid(x,shift=0,mult=1):
return 1 / (1 + np.exp(-(x+shift)*mult))
def main(job_dir, data_dir, variable_strategy, num_gpus, log_device_placement,
num_intra_threads, **hparams):
tf.reset_default_graph()
temp = set(tf.all_variables())
with open(log_file, 'a') as log:
log.flush()
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
#with tf.device("/cpu:0"):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) #For batch normalisation windows
with tf.control_dependencies(update_ops):
# Session configuration.
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,#Once placement is correct, this fills up too much of the cmd window...
intra_op_parallelism_threads=num_intra_threads,
gpu_options=tf.GPUOptions(force_gpu_compatible=True, allow_growth=True))
config = RunConfig(
session_config=sess_config, model_dir=job_dir)
hparams=tf.contrib.training.HParams(
is_chief=config.is_chief,
**hparams)
img, img_truth, img_mask = input_fn(data_dir, 'test', batch_size, num_gpus)
img_val, img_truth_val, img_mask_val = input_fn(data_dir, 'test', batch_size, num_gpus)
with tf.Session(config=sess_config) as sess:
print("Session started")
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
temp = set(tf.all_variables())
____img, ____img_truth, ____img_mask = sess.run([img, img_truth, img_mask])
img_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img')
for i in ____img]
img_truth_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img_truth')
for i in ____img_truth]
img_mask_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img_mask')
for i in ____img_truth]
is_training = True
print("Dataflow established")
learning_rate_ph = tf.placeholder(tf.float32, name='learning_rate')
discr_learning_rate_ph = tf.placeholder(tf.float32, name='discr_learning_rate')
beta1_ph = tf.placeholder(tf.float32, shape=(), name='beta1')
discr_beta1_ph = tf.placeholder(tf.float32, shape=(), name='discr_beta1')
norm_decay_ph = tf.placeholder(tf.float32, shape=(), name='norm_decay')
train_outer_ph = tf.placeholder(tf.bool, name='train_outer')
ramp_ph = tf.placeholder(tf.float32, name='ramp')
#########################################################################################
exp_dict = experiment(img_ph[0], img_truth_ph[0], img_mask_ph[0],
learning_rate_ph, discr_learning_rate_ph,
beta1_ph, discr_beta1_ph, norm_decay_ph,
train_outer_ph, ramp_ph, initialize=True)
print("Created experiment")
sess.run( tf.initialize_variables( set(tf.all_variables())-temp),
feed_dict={beta1_ph: np.float32(0.9), discr_beta1_ph: np.float32(0.5)} )
train_writer = tf.summary.FileWriter( logDir, sess.graph )
#print(tf.all_variables())
saver = tf.train.Saver(max_to_keep=1)
#saver.restore(sess, tf.train.latest_checkpoint(model_dir+"model/"))
saver.restore(sess, tf.train.latest_checkpoint(model_dir+"notable_ckpts/"))
counter = 0
val_counter = 0
save_counter = counter
counter_init = counter+1
base_rate = 0.0001
bad_buffer_size = 50
bad_buffer_truth = []
bad_buffer = []
bad_buffer_mask = []
for _ in range(bad_buffer_size):
lq, buffer_img, mask = sess.run([img, img_truth, img_mask])
bad_buffer_truth.append(buffer_img)
bad_buffer.append(lq)
bad_buffer_mask.append(mask)
bad_buffer_prob = 0.2
bad_buffer_beta = 0.99
bad_buffer_thresh = 0.
bad_buffer_tracker = bad_buffer_prob
bad_buffer_tracker_beta = 0.99
bad_buffer_num_uses = 1
#Here our 'natural' statistics are MSEs
nat_stat_mean_beta = 0.99
nat_stat_std_dev_beta = 0.99
nat_stat_mean = 1.5
nat_stat2_mean = 4.
total_iters = 1_000_000
discr_beta1 = 0.5
discr_learning_rate = 0.0001
wass_iter = 1
train_discr_per_gen = 1 #Number of discriminator training ops per generator training op
num_steps_in_lr_decay = 8
mses = []
max_count = 50
total_errors = None
print("Starting training")
while True:
#Train for a couple of hours
time0 = time.time()
while time.time()-time0 < modelSavePeriod:
if not val_counter % val_skip_n:
val_counter = 0
val_counter += 1
if val_counter % val_skip_n: #Only increment on nan-validation iterations
if not wass_iter % train_discr_per_gen:
counter += 1
wass_iter = 1
gen_train = True
else:
gen_train = False
wass_iter += 1
if counter < 0.25*total_iters:
rate = 3*base_rate
beta1 = 0.9
elif counter < 0.5*total_iters:
len_iters = 0.25*total_iters
rel_iters = counter - 0.25*total_iters
step = int(num_steps_in_lr_decay*rel_iters/len_iters)
rate = 3*base_rate * (1 - step/num_steps_in_lr_decay)
beta1 = 0.9 - 0.4*step/num_steps_in_lr_decay
#elif counter == total_iters//2:
# saver.save(sess, save_path=model_dir+"model/model", global_step=counter)
# quit()
elif counter < 0.75*total_iters:
rate = base_rate
beta1 = 0.5
elif counter < total_iters:
#Stepped linear decay
rel_iters = counter - 0.75*total_iters
step = int(num_steps_in_lr_decay*rel_iters/(0.25*total_iters))
rate = base_rate * ( 1. - step/num_steps_in_lr_decay )
beta1 = 0.5
if counter in [total_iters//2, total_iters]:
saver.save(sess, save_path=model_dir+"notable_ckpts/model", global_step=counter)
#if counter == total_iters:
quit()
learning_rate = np.float32(rate)
if counter < 0.5*total_iters:
norm_decay = 0.99
else:
norm_decay = 1.
ramp = 1.
train_outer = True
base_dict = { learning_rate_ph: learning_rate,
discr_learning_rate_ph: np.float32(discr_learning_rate),
beta1_ph: np.float32(beta1),
discr_beta1_ph: np.float32(discr_beta1),
norm_decay_ph: np.float32(norm_decay),
train_outer_ph: np.bool(train_outer),
ramp_ph: np.float32(ramp)
}
use_buffer = False#np.random.rand() < bad_buffer_num_uses*bad_buffer_prob
if use_buffer:
idx = np.random.randint(0, bad_buffer_size)
_img = bad_buffer[idx]
_img_truth = bad_buffer_truth[idx]
_img_mask = bad_buffer_mask[idx]
print("From buffer")
else:
_img, _img_truth, _img_mask = sess.run([img, img_truth, img_mask])
#disp(_img_mask[0][0])
dict = base_dict.copy()
dict.update( { img_ph[0]: _img[0], img_truth_ph[0]: _img_truth[0], img_mask_ph[0]: _img_mask[0] } )
if counter < max_count:
print(f"Iter: {counter}")
final_output = sess.run(exp_dict["output_outer"], feed_dict=dict)
Image.fromarray(_img[0].reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"partial_scan-{counter}.tif" )
Image.fromarray((0.5*final_output+0.5).reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"output-{counter}.tif" )
Image.fromarray((0.5*_img_truth[0]+0.5).reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"truth-{counter}.tif" )
Image.fromarray(_img_mask[0].reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"mask-{counter}.tif" )
else:
quit()
#if counter < 0.5*total_iters:
# train_ops = exp_dict['start_train_ops']
#else:
# train_ops = exp_dict['end_train_ops'] if gen_train else []
# train_ops += exp_dict['train_ops_discr']
#other_ops = [exp_dict['mse_inner'], exp_dict['mse_outer'], exp_dict['wass_loss_outer'], exp_dict['wass_loss_d_outer']]
#output_ops = [exp_dict['output_outer']]
#output_size = cropsize
##Save outputs occasionally
#if 0 <= counter <= 1 or not counter % save_result_every_n_batches or (0 <= counter < 10000 and not counter % 1000) or counter == counter_init:
# #Don't train on validation examples
# if not val_counter % val_skip_n:
# results = sess.run( other_ops + output_ops, feed_dict=dict )
# else:
# results = sess.run( other_ops + output_ops + train_ops, feed_dict=dict )
# mse_in = results[0]
# mse = results[1]
# wass_loss = results[2]
# wass_d_loss = results[3]
# output = results[len(other_ops)]
# try:
# save_input_loc = model_dir+"input-"+str(counter)+".tif"
# save_truth_loc = model_dir+"truth-"+str(counter)+".tif"
# save_output_loc = model_dir+"output-"+str(counter)+".tif"
# save_mask_loc = model_dir+"mask-"+str(counter)+".tif"
# Image.fromarray((_img[0]).reshape(cropsize, cropsize).astype(np.float32)).save( save_input_loc )
# Image.fromarray((0.5*_img_truth[0]+0.5).reshape(cropsize, cropsize).astype(np.float32)).save( save_truth_loc )
# Image.fromarray((0.5*output+0.5).reshape(output_size, output_size).astype(np.float32)).save( save_output_loc )
# Image.fromarray((_img_mask[0]).reshape(cropsize, cropsize).astype(np.float32)).save( save_mask_loc )
# except:
# print("Image save failed")
#else:
# #Don't train on validation examples
# if not val_counter % val_skip_n:
# results = sess.run( other_ops, feed_dict=dict )
# else:
# results = sess.run( other_ops + train_ops, feed_dict=dict )
# mse_in = results[0]
# mse = results[1]
# wass_loss = results[2]
# wass_d_loss = results[3]
#nat_stat_mean = (nat_stat_mean_beta*nat_stat_mean +
# (1.-nat_stat_mean_beta)*mse)
#nat_stat2_mean = (nat_stat_std_dev_beta*nat_stat2_mean +
# (1.-nat_stat_std_dev_beta)*mse**2)
#nat_stat_std_dev = np.sqrt(nat_stat2_mean - nat_stat_mean**2)
##Decide whether or not to add to buffer using natural statistics
#if not use_buffer and mse > bad_buffer_thresh:
# idx = np.random.randint(0, bad_buffer_size)
# bad_buffer[idx] = _img
# bad_buffer_truth[idx] = _img_truth
# bad_buffer_mask[idx] = _img_mask
# bad_buffer_tracker = ( bad_buffer_tracker_beta*bad_buffer_tracker +
# (1.-bad_buffer_tracker_beta) )
# print("To buffer")#, bad_buffer_thresh, bad_buffer_prob, bad_buffer_tracker)
#else:
# bad_buffer_tracker = bad_buffer_tracker_beta*bad_buffer_tracker
#if bad_buffer_tracker < bad_buffer_prob:
# step = nat_stat_mean-5*nat_stat_std_dev
# bad_buffer_thresh = bad_buffer_beta*bad_buffer_thresh + (1.-bad_buffer_beta)*step
#if bad_buffer_tracker >= bad_buffer_prob:
# step = nat_stat_mean+5*nat_stat_std_dev
# bad_buffer_thresh = bad_buffer_beta*bad_buffer_thresh + (1.-bad_buffer_beta)*step
#message = "NiN-44, Iter: {}, MSE_in: {}, MSE: {}, Wass G: {}, Wass D: {}, Val: {}".format(
# counter, 3.5/2*mse_in, 3.5/2*mse, wass_loss, wass_d_loss,
# 1 if not val_counter % val_skip_n else 0)
#print(message)
#try:
# log.write(message)
#except:
# print("Write to log failed")
#Save the model
#saver.save(sess, save_path=model_dir+"model/model", global_step=counter)
save_counter = counter
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default=data_dir,
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--job-dir',
type=str,
default=model_dir,
help='The directory where the model will be stored.')
parser.add_argument(
'--variable-strategy',
choices=['CPU', 'GPU'],
type=str,
default='GPU',
help='Where to locate variable operations')
parser.add_argument(
'--num-gpus',
type=int,
default=num_gpus,
help='The number of gpus used. Uses only CPU if set to 0.')
parser.add_argument(
'--log-device-placement',
action='store_true',
default=True,
help='Whether to log device placement.')
parser.add_argument(
'--num-intra-threads',
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""")
parser.add_argument(
'--train-steps',
type=int,
default=80000,
help='The number of steps to use for training.')
parser.add_argument(
'--train-batch-size',
type=int,
default=batch_size,
help='Batch size for training.')
parser.add_argument(
'--eval-batch-size',
type=int,
default=batch_size,
help='Batch size for validation.')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for MomentumOptimizer.')
parser.add_argument(
'--learning-rate',
type=float,
default=0.1,
help="""\
This is the inital learning rate value. The learning rate will decrease
during training. For more details check the model_fn implementation in
this file.\
""")
parser.add_argument(
'--sync',
action='store_true',
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""")
parser.add_argument(
'--num-inter-threads',
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""")
parser.add_argument(
'--data-format',
type=str,
default="NHWC",
help="""\
If not set, the data format best for the training device is used.
Allowed values: channels_first (NCHW) channels_last (NHWC).\
""")
parser.add_argument(
'--batch-norm-decay',
type=float,
default=0.997,
help='Decay for batch norm.')
parser.add_argument(
'--batch-norm-epsilon',
type=float,
default=1e-5,
help='Epsilon for batch norm.')
args = parser.parse_args()
if args.num_gpus > 0:
assert tf.test.is_gpu_available(), "Requested GPUs but none found."
if args.num_gpus < 0:
raise ValueError(
'Invalid GPU count: \"--num-gpus\" must be 0 or a positive integer.')
if args.num_gpus == 0 and args.variable_strategy == 'GPU':
raise ValueError('num-gpus=0, CPU must be used as parameter server. Set'
'--variable-strategy=CPU.')
if args.num_gpus != 0 and args.train_batch_size % args.num_gpus != 0:
raise ValueError('--train-batch-size must be multiple of --num-gpus.')
if args.num_gpus != 0 and args.eval_batch_size % args.num_gpus != 0:
raise ValueError('--eval-batch-size must be multiple of --num-gpus.')
main(**vars(args))
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"TF_ENABLE_WINOGRAD_NONFUSED",
"TF_SYNC_ON_FINISH"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "TF_ENABLE_WINOGRAD_NONFUSED", "TF_SYNC_ON_FINISH"]
|
python
| 3 | 0 | |
runtime/eth_test.py
|
#!/usr/bin/python
import sys
import socket
import struct
import time
import logging
import sys,getopt
import os
import random
import numpy
from matplotlib import pyplot as plt
IPADDR = os.environ.get('IP_ADDR')
if IPADDR is None: IPADDR = 'rflab1.lbl.gov' # 128.3.128.122
PORTNUM = 3000
global plot_ena, slow_ena
plot_ena=0
slow_ena=0
def three_bytes(ad):
" encode an integer as three bytes "
adx = struct.pack('!i',ad)
return adx[1:4]
def mem_gate_write_prep(alist, dlist):
" write register through mem_gateway "
p = struct.pack('!I',random.getrandbits(32))
p += struct.pack('!I',random.getrandbits(32))
for ix,ad in enumerate(alist):
# read commands include space for result
# print dlist[ix]
p += '\x00' + three_bytes(ad) + struct.pack('!I',dlist[ix])
#print p.encode('hex')
return p
def mem_gate_write(s,p):
s.send(p)
r, addr = s.recvfrom(1024) # buffer size is 1024 bytes
#print r.encode('hex')
if (r[0:8] != p[0:8]):
print "header mismatch"
sys.exit(2)
#res=[] # build up result list here
#for ix in range(0, len(alist)):
# rh = (r[12+8*ix:16+8*ix])
# res.append(struct.unpack('!I',rh)[0])
## print "%6.6x: %s"%(alist[ix], rh.encode('hex'))
#return res
def mem_gate_read(s, alist):
" read config_romx "
p = struct.pack('!I',random.getrandbits(32))
p += struct.pack('!I',random.getrandbits(32))
for ad in alist:
# read commands include space for result
p += '\x10' + three_bytes(ad) + 4*' '
s.send(p)
r, addr = s.recvfrom(1024) # buffer size is 1024 bytes
if (r[0:8] != p[0:8]):
print "header mismatch"
sys.exit(2)
ra = r[ 8:12]
if (alist[0] + 0x10000000 != int(ra.encode('hex'),16)):
print 'echo first address %x %x'%(alist[0],int(ra.encode('hex'),16))
res=[] # build up result list here
for ix in range(0, len(alist)):
rv = r[12+8*ix:16+8*ix]
int_value=int(rv.encode('hex'),16)
res.append(int_value)
return res
def decode_lbnl_rom(dat):
" decode content of config_romx "
d = numpy.bitwise_and(dat, 0xff)
if (d[0] == 85):
user_l={1:"ldoolitt",2:"cswanson",3:"kasemir",4:"hengjie",5:"crofford",6:"meddeler",7:"baptiste",8:"llrf_oper",9:"hyaver",10:"dim",11:"begcbp",12:"ghuang",13:"luser",14:"kstefan",15:"cserrano",16:"asalom",17:"du",18:"yangjin",19:"lilima",20:"ernesto"}
user = user_l[d[9]] if d[9] in user_l else "unknown"
board_l={1:"mebt",2:"interim",3:"fcm",4:"avnet",5:"uxo",6:"llrf4",7:"av5t",8:"sp601",9:"sp605",10:"ml505",11:"ml506",12:"fllrf",13:"spec",14:"lx150t",15:"cute_wr",17:"ac701",18:"ml605",19:"kc705",99:"test"}
board = board_l[d[10]] if d[10] in board_l else "unknown"
print "DSP flavor: %d"%d[1]
print "build date: %4.4d-%2.2d-%2.2d"%(d[2]+2000,d[3],d[4])
print "build time: %2.2d:%2.2d UTC"%(d[5],d[6])
print "tool rev: %d.%d"%(d[8]/16,d[8]%16)
print "user: %d (%s)"%(d[9],user)
print "board type: %d (%s)"%(d[10],board)
gs=""
for ix in range(0,20):
gs+=chr(d[12+ix])
print "git commit: %s"%(gs.encode('hex'))
if d[32] == 170:
print "circle_aw: %d"%d[33]
print "mode_count: %d"%d[34]
print "mode_shift: %d"%d[35]
print "n_mech_modes: %d"%d[36]
print "df_scale: %d"%d[37]
print "simple_demo: %d"%d[38]
else:
print "no magic found %d"%d[0]
# circle_count, circle_stat, adc min/max (6 words), tag_now, tag_old, timestamp
def slow_decode(aux):
a = [aux[2*ix]*256+aux[2*ix+1] for ix in range(0,2)] # circle_buf.v
b = [aux[2*ix]*256+aux[2*ix+1] for ix in range(2,8)]
b = [bb if bb<32767 else bb-65536 for bb in b] # 3 x ADC min/max
tag_now = aux[16]
tag_old = aux[17]
c = aux[26:17:-1] # timestamp.v
t = 0
for cc in c: t=t*256+cc
t = t/32 # five false bits at bottom; this converts to actual clock ticks
#if not plot_ena:
if 1:
print a, b, tag_now, tag_old, t
# New!
def acknowledge_buffer(s):
mem_gate_write(s,mem_gate_write_prep([0,0,0,0,0,0x3800,0],[0,0,0,0,0,1,0]));
def read_mem_buf(s):
res=[]
while (not mem_gate_read(s,range(0,32))[0]>>8&1):
print 'circular buffer not ready yet'
time.sleep(0.02)
aux = mem_gate_read(s,range(0x2011,0x2031)) # "slow" readout registers
if slow_ena:
slow_decode(aux)
for index in range(0x4000,0x6000,0x40):
res.extend(mem_gate_read(s,range(index,index+0x40)))
# assume 8 bits selected in ch_keep
acknowledge_buffer(s)
return [res,aux]
def setup_sock():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((IPADDR, PORTNUM))
# set up for address decoder in cryomodule.v, not larger.v
rom = mem_gate_read(s, range(0x10000,0x10000+48))
decode_lbnl_rom(rom)
return s
# Main procedure
def main(argv):
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
s = setup_sock()
if argv[0] == "config":
exit(0)
if plot_ena:
fig=plt.figure(1)
fig.show()
# send the address/value pairs created by param.py to the hardware
with open('larger_in.dat', 'r') as f:
addr_value=f.read().split('\n')
addr=[]
value=[]
for line in addr_value:
#if line:
# (ad, vd) = map(int, line.split())
# addr.append(ad)
# value.append(vd)
aa=line.split()
if aa:
addr.append(int(aa[0]))
v=int(aa[1])
if (v<0): v += 2**32
value.append(v)
mem_gate_write(s,mem_gate_write_prep(addr,value));
fcnt=0;
while (fcnt < 10 or plot_ena):
#mem_gate_write(s,mem_gate_write_prep(addr,value));
[res,aux]=read_mem_buf(s)
varray=numpy.array([x-65536 if x>32767 else x for x in res]).reshape([1024,8])
# numpy.savetxt("live%d.dat"%fcnt,res,'%6.0f')
fcnt += 1
if plot_ena:
plt.plot(varray)
fig.canvas.draw()
fig.clf()
else:
print "not a plot",fcnt
s.close()
if __name__ == "__main__":
argv = sys.argv[1:]
plot_ena = 'plot' in argv
slow_ena = 'slow' in argv
if plot_ena: from matplotlib import pyplot as plt
main(argv)
|
[] |
[] |
[
"IP_ADDR"
] |
[]
|
["IP_ADDR"]
|
python
| 1 | 0 | |
heterogeneous_graph.py
|
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
import torch
import numpy as np
import pickle
from models.TGDRP import TGDRP
from utils import *
from rdkit import DataStructs,Chem
from rdkit.Chem import AllChem
from scipy.stats import pearsonr
import argparse
dir = './data/similarity_augment/'
dict_dir = './data/similarity_augment/dict/'
with open(dict_dir + "cell_id2idx_dict", 'rb') as f:
cell_id2idx_dict = pickle.load(f)
with open(dict_dir + "drug_name_cell_id_ic50", 'rb') as f:
drug_name_cell_id_ic50 = pickle.load(f)
with open(dict_dir + "drug_idx_cell_idx_ic50", 'rb') as f:
drug_idx_cell_idx_ic50 = pickle.load(f)
with open(dict_dir + "drug_name2smiles_dict", 'rb') as f:
drug_name2smiles_dict = pickle.load(f)
with open(dict_dir + "drug_idx2smiles_dict", 'rb') as f:
drug_idx2smiles_dict = pickle.load(f)
with open(dict_dir + "drug_name2idx_dict", 'rb') as f:
drug_name2idx_dict = pickle.load(f)
with open(dict_dir + "cell_idx2id_dict", 'rb') as f:
cell_idx2id_dict = pickle.load(f)
with open(dict_dir + "drug_idx2name_dict", 'rb') as f:
drug_idx2name_dict = pickle.load(f)
with open(dict_dir + "cell_feature_normalized", 'rb') as f:
cell_feature_normalized = pickle.load(f)
with open(dict_dir + "cell_feature", 'rb') as f:
cell_feature = pickle.load(f)
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42,
help='random seed (default: 42)')
parser.add_argument('--device', type=str, default='cuda:7',
help='device')
parser.add_argument('--knn', type=int, default=5,
help='knn')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size (default: 128)')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--layer_drug', type=int, default=3, help='layer for drug')
parser.add_argument('--dim_drug', type=int, default=128, help='hidden dim for drug')
parser.add_argument('--layer', type=int, default=2, help='number of GNN layer')
parser.add_argument('--hidden_dim', type=int, default=8, help='hidden dim for cell')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
parser.add_argument('--dropout_ratio', type=float, default=0.2,
help='dropout ratio')
parser.add_argument('--epochs', type=int, default=300,
help='maximum number of epochs (default: 300)')
parser.add_argument('--patience', type=int, default=10,
help='patience for earlystopping (default: 10)')
parser.add_argument('--edge', type=str, default='PPI_0.95', help='edge for gene graph')
parser.add_argument('--mode', type=str, default='train', help='train or test')
parser.add_argument('--pretrain', type=int, default=0, help='pretrain')
parser.add_argument('--weight_path', type=str, default='',
help='filepath for pretrained weights')
return parser.parse_args(args=[])
def computing_sim_matrix():
if os.path.exists(dict_dir + "cell_sim_matrix") and os.path.exists(dict_dir + "drug_sim_matrix"):
with open(dict_dir+ "cell_sim_matrix", 'rb') as f:
cell_sim_matrix = pickle.load(f)
with open(dict_dir+ "drug_sim_matrix", 'rb') as f:
drug_sim_matrix = pickle.load(f)
return drug_sim_matrix, cell_sim_matrix
drug_sim_matrix = np.zeros((len(drug_name2idx_dict), len(drug_name2idx_dict)))
mi = [Chem.MolFromSmiles(drug_idx2smiles_dict[i]) for i in range(len(drug_name2idx_dict))]
fps = [AllChem.GetMorganFingerprint(x, 4) for x in mi]
for i in range(len(drug_name2idx_dict)):
for j in range(len(drug_name2idx_dict)):
if i != j:
drug_sim_matrix[i][j] = DataStructs.DiceSimilarity(fps[i],fps[j])
cell_sim_matrix = np.zeros((len(cell_id2idx_dict), len(cell_id2idx_dict)))
for i in range(len(cell_id2idx_dict)):
for j in range(len(cell_id2idx_dict)):
if i != j:
cell_sim_matrix[i][j], _ = pearsonr(cell_feature_normalized[i], cell_feature_normalized[j])
if cell_sim_matrix[i][j] < 0:
cell_sim_matrix[i][j] = 0
with open(dict_dir+ "cell_sim_matrix", 'wb') as f:
pickle.dump(cell_sim_matrix, f)
with open(dict_dir+ "drug_sim_matrix", 'wb') as f:
pickle.dump(drug_sim_matrix, f)
return drug_sim_matrix, cell_sim_matrix
def computing_knn(k):
drug_sim_matrix, cell_sim_matrix = computing_sim_matrix()
cell_sim_matrix_new = np.zeros_like(cell_sim_matrix)
for u in range(len(cell_id2idx_dict)):
v = cell_sim_matrix[u].argsort()[-6:-1]
cell_sim_matrix_new[u][v] = cell_sim_matrix[u][v]
drug_sim_matrix_new = np.zeros_like(drug_sim_matrix)
for u in range(len(drug_name2idx_dict)):
v = drug_sim_matrix[u].argsort()[-6:-1]
drug_sim_matrix_new[u][v] = drug_sim_matrix[u][v]
drug_edges = np.argwhere(drug_sim_matrix_new > 0)
cell_edges = np.argwhere(cell_sim_matrix_new > 0)
with open(dir + "edge/drug_cell_edges_{}_knn".format(k), 'wb') as f:
pickle.dump((drug_edges, cell_edges), f)
if __name__ == '__main__':
computing_knn(5)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER"
] |
[]
|
["CUDA_DEVICE_ORDER"]
|
python
| 1 | 0 | |
vendor/github.com/go-git/go-git/v5/config/config.go
|
// Package config contains the abstraction of multiple config files
package config
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
format "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/mitchellh/go-homedir"
)
const (
// DefaultFetchRefSpec is the default refspec used for fetch.
DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*"
// DefaultPushRefSpec is the default refspec used for push.
DefaultPushRefSpec = "refs/heads/*:refs/heads/*"
)
// ConfigStorer generic storage of Config object
type ConfigStorer interface {
Config() (*Config, error)
SetConfig(*Config) error
}
var (
ErrInvalid = errors.New("config invalid key in remote or branch")
ErrRemoteConfigNotFound = errors.New("remote config not found")
ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL")
ErrRemoteConfigEmptyName = errors.New("remote config: empty name")
)
// Scope defines the scope of a config file, such as local, global or system.
type Scope int
// Available ConfigScope's
const (
LocalScope Scope = iota
GlobalScope
SystemScope
)
// Config contains the repository configuration
// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES
type Config struct {
Core struct {
// IsBare if true this repository is assumed to be bare and has no
// working directory associated with it.
IsBare bool
// Worktree is the path to the root of the working tree.
Worktree string
// CommentChar is the character indicating the start of a
// comment for commands like commit and tag
CommentChar string
}
User struct {
// Name is the personal name of the author and the commiter of a commit.
Name string
// Email is the email of the author and the commiter of a commit.
Email string
}
Author struct {
// Name is the personal name of the author of a commit.
Name string
// Email is the email of the author of a commit.
Email string
}
Committer struct {
// Name is the personal name of the commiter of a commit.
Name string
// Email is the email of the the commiter of a commit.
Email string
}
Pack struct {
// Window controls the size of the sliding window for delta
// compression. The default is 10. A value of 0 turns off
// delta compression entirely.
Window uint
}
Init struct {
// DefaultBranch Allows overriding the default branch name
// e.g. when initializing a new repository or when cloning
// an empty repository.
DefaultBranch string
}
// Remotes list of repository remotes, the key of the map is the name
// of the remote, should equal to RemoteConfig.Name.
Remotes map[string]*RemoteConfig
// Submodules list of repository submodules, the key of the map is the name
// of the submodule, should equal to Submodule.Name.
Submodules map[string]*Submodule
// Branches list of branches, the key is the branch name and should
// equal Branch.Name
Branches map[string]*Branch
// URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the
// key instead.
URLs map[string]*URL
// Raw contains the raw information of a config file. The main goal is
// preserve the parsed information from the original format, to avoid
// dropping unsupported fields.
Raw *format.Config
}
// NewConfig returns a new empty Config.
func NewConfig() *Config {
config := &Config{
Remotes: make(map[string]*RemoteConfig),
Submodules: make(map[string]*Submodule),
Branches: make(map[string]*Branch),
URLs: make(map[string]*URL),
Raw: format.New(),
}
config.Pack.Window = DefaultPackWindow
return config
}
// ReadConfig reads a config file from a io.Reader.
func ReadConfig(r io.Reader) (*Config, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
cfg := NewConfig()
if err = cfg.Unmarshal(b); err != nil {
return nil, err
}
return cfg, nil
}
// LoadConfig loads a config file from a given scope. The returned Config,
// contains exclusively information fom the given scope. If couldn't find a
// config file to the given scope, a empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
}
files, err := Paths(scope)
if err != nil {
return nil, err
}
for _, file := range files {
f, err := osfs.Default.Open(file)
if err != nil {
if os.IsNotExist(err) {
continue
}
return nil, err
}
defer f.Close()
return ReadConfig(f)
}
return NewConfig(), nil
}
// Paths returns the config file location for a given scope.
func Paths(scope Scope) ([]string, error) {
var files []string
switch scope {
case GlobalScope:
xdg := os.Getenv("XDG_CONFIG_HOME")
if xdg != "" {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := homedir.Dir()
if err != nil {
return nil, err
}
files = append(files,
filepath.Join(home, ".gitconfig"),
filepath.Join(home, ".config/git/config"),
)
case SystemScope:
files = append(files, "/etc/gitconfig")
}
return files, nil
}
// Validate validates the fields and sets the default values.
func (c *Config) Validate() error {
for name, r := range c.Remotes {
if r.Name != name {
return ErrInvalid
}
if err := r.Validate(); err != nil {
return err
}
}
for name, b := range c.Branches {
if b.Name != name {
return ErrInvalid
}
if err := b.Validate(); err != nil {
return err
}
}
return nil
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
defaultBranchKey = "defaultBranch"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
DefaultPackWindow = uint(10)
)
// Unmarshal parses a git-config file and stores it.
func (c *Config) Unmarshal(b []byte) error {
r := bytes.NewBuffer(b)
d := format.NewDecoder(r)
c.Raw = format.New()
if err := d.Decode(c.Raw); err != nil {
return err
}
c.unmarshalCore()
c.unmarshalUser()
c.unmarshalInit()
if err := c.unmarshalPack(); err != nil {
return err
}
unmarshalSubmodules(c.Raw, c.Submodules)
if err := c.unmarshalBranches(); err != nil {
return err
}
if err := c.unmarshalURLs(); err != nil {
return err
}
return c.unmarshalRemotes()
}
func (c *Config) unmarshalCore() {
s := c.Raw.Section(coreSection)
if s.Options.Get(bareKey) == "true" {
c.Core.IsBare = true
}
c.Core.Worktree = s.Options.Get(worktreeKey)
c.Core.CommentChar = s.Options.Get(commentCharKey)
}
func (c *Config) unmarshalUser() {
s := c.Raw.Section(userSection)
c.User.Name = s.Options.Get(nameKey)
c.User.Email = s.Options.Get(emailKey)
s = c.Raw.Section(authorSection)
c.Author.Name = s.Options.Get(nameKey)
c.Author.Email = s.Options.Get(emailKey)
s = c.Raw.Section(committerSection)
c.Committer.Name = s.Options.Get(nameKey)
c.Committer.Email = s.Options.Get(emailKey)
}
func (c *Config) unmarshalPack() error {
s := c.Raw.Section(packSection)
window := s.Options.Get(windowKey)
if window == "" {
c.Pack.Window = DefaultPackWindow
} else {
winUint, err := strconv.ParseUint(window, 10, 32)
if err != nil {
return err
}
c.Pack.Window = uint(winUint)
}
return nil
}
func (c *Config) unmarshalRemotes() error {
s := c.Raw.Section(remoteSection)
for _, sub := range s.Subsections {
r := &RemoteConfig{}
if err := r.unmarshal(sub); err != nil {
return err
}
c.Remotes[r.Name] = r
}
// Apply insteadOf url rules
for _, r := range c.Remotes {
r.applyURLRules(c.URLs)
}
return nil
}
func (c *Config) unmarshalURLs() error {
s := c.Raw.Section(urlSection)
for _, sub := range s.Subsections {
r := &URL{}
if err := r.unmarshal(sub); err != nil {
return err
}
c.URLs[r.Name] = r
}
return nil
}
func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) {
s := fc.Section(submoduleSection)
for _, sub := range s.Subsections {
m := &Submodule{}
m.unmarshal(sub)
if m.Validate() == ErrModuleBadPath {
continue
}
submodules[m.Name] = m
}
}
func (c *Config) unmarshalBranches() error {
bs := c.Raw.Section(branchSection)
for _, sub := range bs.Subsections {
b := &Branch{}
if err := b.unmarshal(sub); err != nil {
return err
}
c.Branches[b.Name] = b
}
return nil
}
func (c *Config) unmarshalInit() {
s := c.Raw.Section(initSection)
c.Init.DefaultBranch = s.Options.Get(defaultBranchKey)
}
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalUser()
c.marshalPack()
c.marshalRemotes()
c.marshalSubmodules()
c.marshalBranches()
c.marshalURLs()
c.marshalInit()
buf := bytes.NewBuffer(nil)
if err := format.NewEncoder(buf).Encode(c.Raw); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (c *Config) marshalCore() {
s := c.Raw.Section(coreSection)
s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
if c.Core.Worktree != "" {
s.SetOption(worktreeKey, c.Core.Worktree)
}
}
func (c *Config) marshalUser() {
s := c.Raw.Section(userSection)
if c.User.Name != "" {
s.SetOption(nameKey, c.User.Name)
}
if c.User.Email != "" {
s.SetOption(emailKey, c.User.Email)
}
s = c.Raw.Section(authorSection)
if c.Author.Name != "" {
s.SetOption(nameKey, c.Author.Name)
}
if c.Author.Email != "" {
s.SetOption(emailKey, c.Author.Email)
}
s = c.Raw.Section(committerSection)
if c.Committer.Name != "" {
s.SetOption(nameKey, c.Committer.Name)
}
if c.Committer.Email != "" {
s.SetOption(emailKey, c.Committer.Email)
}
}
func (c *Config) marshalPack() {
s := c.Raw.Section(packSection)
if c.Pack.Window != DefaultPackWindow {
s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window))
}
}
func (c *Config) marshalRemotes() {
s := c.Raw.Section(remoteSection)
newSubsections := make(format.Subsections, 0, len(c.Remotes))
added := make(map[string]bool)
for _, subsection := range s.Subsections {
if remote, ok := c.Remotes[subsection.Name]; ok {
newSubsections = append(newSubsections, remote.marshal())
added[subsection.Name] = true
}
}
remoteNames := make([]string, 0, len(c.Remotes))
for name := range c.Remotes {
remoteNames = append(remoteNames, name)
}
sort.Strings(remoteNames)
for _, name := range remoteNames {
if !added[name] {
newSubsections = append(newSubsections, c.Remotes[name].marshal())
}
}
s.Subsections = newSubsections
}
func (c *Config) marshalSubmodules() {
s := c.Raw.Section(submoduleSection)
s.Subsections = make(format.Subsections, len(c.Submodules))
var i int
for _, r := range c.Submodules {
section := r.marshal()
// the submodule section at config is a subset of the .gitmodule file
// we should remove the non-valid options for the config file.
section.RemoveOption(pathKey)
s.Subsections[i] = section
i++
}
}
func (c *Config) marshalBranches() {
s := c.Raw.Section(branchSection)
newSubsections := make(format.Subsections, 0, len(c.Branches))
added := make(map[string]bool)
for _, subsection := range s.Subsections {
if branch, ok := c.Branches[subsection.Name]; ok {
newSubsections = append(newSubsections, branch.marshal())
added[subsection.Name] = true
}
}
branchNames := make([]string, 0, len(c.Branches))
for name := range c.Branches {
branchNames = append(branchNames, name)
}
sort.Strings(branchNames)
for _, name := range branchNames {
if !added[name] {
newSubsections = append(newSubsections, c.Branches[name].marshal())
}
}
s.Subsections = newSubsections
}
func (c *Config) marshalURLs() {
s := c.Raw.Section(urlSection)
s.Subsections = make(format.Subsections, len(c.URLs))
var i int
for _, r := range c.URLs {
section := r.marshal()
// the submodule section at config is a subset of the .gitmodule file
// we should remove the non-valid options for the config file.
s.Subsections[i] = section
i++
}
}
func (c *Config) marshalInit() {
s := c.Raw.Section(initSection)
if c.Init.DefaultBranch != "" {
s.SetOption(defaultBranchKey, c.Init.DefaultBranch)
}
}
// RemoteConfig contains the configuration for a given remote repository.
type RemoteConfig struct {
// Name of the remote
Name string
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
URLs []string
// insteadOfRulesApplied have urls been modified
insteadOfRulesApplied bool
// originalURLs are the urls before applying insteadOf rules
originalURLs []string
// Fetch the default set of "refspec" for fetch operation
Fetch []RefSpec
// raw representation of the subsection, filled by marshal or unmarshal are
// called
raw *format.Subsection
}
// Validate validates the fields and sets the default values.
func (c *RemoteConfig) Validate() error {
if c.Name == "" {
return ErrRemoteConfigEmptyName
}
if len(c.URLs) == 0 {
return ErrRemoteConfigEmptyURL
}
for _, r := range c.Fetch {
if err := r.Validate(); err != nil {
return err
}
}
if len(c.Fetch) == 0 {
c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
}
return nil
}
func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
c.raw = s
fetch := []RefSpec{}
for _, f := range c.raw.Options.GetAll(fetchKey) {
rs := RefSpec(f)
if err := rs.Validate(); err != nil {
return err
}
fetch = append(fetch, rs)
}
c.Name = c.raw.Name
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
c.Fetch = fetch
return nil
}
func (c *RemoteConfig) marshal() *format.Subsection {
if c.raw == nil {
c.raw = &format.Subsection{}
}
c.raw.Name = c.Name
if len(c.URLs) == 0 {
c.raw.RemoveOption(urlKey)
} else {
urls := c.URLs
if c.insteadOfRulesApplied {
urls = c.originalURLs
}
c.raw.SetOption(urlKey, urls...)
}
if len(c.Fetch) == 0 {
c.raw.RemoveOption(fetchKey)
} else {
var values []string
for _, rs := range c.Fetch {
values = append(values, rs.String())
}
c.raw.SetOption(fetchKey, values...)
}
return c.raw
}
func (c *RemoteConfig) IsFirstURLLocal() bool {
return url.IsLocalEndpoint(c.URLs[0])
}
func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) {
// save original urls
originalURLs := make([]string, len(c.URLs))
copy(originalURLs, c.URLs)
for i, url := range c.URLs {
if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil {
c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i])
c.insteadOfRulesApplied = true
}
}
if c.insteadOfRulesApplied {
c.originalURLs = originalURLs
}
}
|
[
"\"XDG_CONFIG_HOME\""
] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
go
| 1 | 0 | |
main.py
|
import os
from discord.ext import commands
from dotenv import load_dotenv
from botutil.prefix import PrefixFile
load_dotenv()
prefix_manager = PrefixFile()
async def prefix_load(bot, message):
prefix = await prefix_manager.get(message.guild.id)
return commands.when_mentioned_or(prefix)(bot, message)
bot = commands.Bot(command_prefix=prefix_load)
@bot.event
async def ready():
print('init ok')
@bot.event
async def on_command_error(ctx, error):
await ctx.send('そんなコマンド無いで')
@bot.command()
async def hello(ctx):
await ctx.send("アカネチャンやで~")
@bot.command()
async def prefix(ctx, prefix=None):
if ctx.author.bot:
return
if prefix is None:
now_prefix = await prefix_manager.get(ctx.guild.id)
await ctx.send('アカネチャン呼び出す時の命令は "{0}" やで'.format(now_prefix))
else:
result = await prefix_manager.set(ctx.guild.id, prefix)
await ctx.send(
'アカネチャン呼び出す時の命令が "{0}" から "{1}" に変わったで~'
.format(result[0], result[1]))
bot.run(os.environ.get('DISCORD_BOT_TOKEN'))
|
[] |
[] |
[
"DISCORD_BOT_TOKEN"
] |
[]
|
["DISCORD_BOT_TOKEN"]
|
python
| 1 | 0 | |
g8/main.go
|
// The MIT License (MIT)
//
// Copyright (c) 2014 Matt Ho
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package main
import (
"fmt"
"github.com/codegangsta/cli"
"github.com/savaki/go-giter8/git"
"github.com/savaki/properties"
"log"
"os"
"strings"
)
func main() {
app := cli.NewApp()
app.Name = "giter8"
app.Usage = "generate templates using github"
app.Version = "0.1"
app.Commands = []cli.Command{
commandNew,
}
app.Run(os.Args)
}
func check(err error) {
if err != nil {
log.Fatalln(err)
}
}
// helper to determine if path exists
func exists(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
// ExportRepo(git, loyal3/service-template-finatra.g8) => nil
func exportRepo(gitpath, repo string) error {
if exists(Path(repo)) {
return nil
}
user := strings.Split(repo, "/")[0]
client := git.New(gitpath, Path(user))
client.Verbose = Verbose
return client.Export(repo)
}
// path relative to our temporary storage location
func Path(dirs ...string) string {
subdir := strings.Join(dirs, "/")
return fmt.Sprintf("%s/.go-giter8/%s", os.Getenv("HOME"), subdir)
}
func readFields(repo string) (map[string]string, error) {
// assume giter8 format
path := Path(repo, "src/main/g8/default.properties")
p, err := properties.LoadFile(path, properties.UTF8)
if err != nil {
return map[string]string{}, nil
}
// ask the user for input on each of the fields
fields := map[string]string{}
for _, key := range p.Keys() {
defaultValue := p.GetString(key, "")
fmt.Printf("%s [%s]: ", key, defaultValue)
var value string
fmt.Scanln(&value)
if strings.TrimSpace(value) == "" {
fields[key] = defaultValue
} else {
fields[key] = value
}
}
return fields, nil
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
sauna/reload/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 University of Jyväskylä and Contributors.
#
# All Rights Reserved.
#
# Authors:
# Esa-Matti Suuronen <[email protected]>
# Asko Soukka <[email protected]>
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
"""
sauna.reload
============
Enable sauna.reload's Zope patches and deferrend z3c.autoinclude includes
by adding ``zope-conf-additional = %import sauna.reload``
into your buildout's part with *plone.recipe.zope2instance*-recipe::
[instance]
recipe = plone.recipe.zope2instance
zope-conf-additional = %import sauna.reload
"""
import sys
import os
from sauna.reload.forkloop import ForkLoop
from sauna.reload.reloadpaths import ReloadPaths
reload_paths = ReloadPaths([
os.path.join(os.getcwd(), p)
for p in os.environ.get("RELOAD_PATH", "").split(":")
if p
])
forkloop = ForkLoop()
forkloop.startBootTimer()
# Hook into PEP 302 laoder
from sauna.reload.monkeypatcher import MonkeyPatchingLoader
__loader__ = MonkeyPatchingLoader(sys.modules[__name__])
# Prepopulate platform.uname, before it gets lost in the stack
import platform
try:
uname = platform.uname()
except IOError:
# platform.uname() may throw IOError when called from a forked process
uname = ""
|
[] |
[] |
[
"RELOAD_PATH"
] |
[]
|
["RELOAD_PATH"]
|
python
| 1 | 0 | |
jupyter_notebook_config.py
|
# Configuration file for jupyter-notebook.
import os
from IPython.lib import passwd
c = get_config()
c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.port = int(os.getenv('JUPYTER_PORT', 8888))
c.NotebookApp.open_browser = False
c.NotebookApp.notebook_dir = '/root/volume'
c.NotebookApp.base_url = os.getenv("JUPYTER_BASEURL", "/")
c.NotebookApp.iopub_data_rate_limit=100000000.0 #(bytes/sec)
c.NotebookApp.rate_limit_window=10.0 #(secs)
# sets a password if JUPYTER_PASSWORD is set in the environment
if 'JUPYTER_PASSWORD' in os.environ:
#c.NotebookApp.token = passwd(os.environ['JUPYTER_PASSWORD'])
c.NotebookApp.password = passwd(os.environ['JUPYTER_PASSWORD'])
del os.environ['JUPYTER_PASSWORD']
print("jupyter config..")
print(c)
|
[] |
[] |
[
"JUPYTER_BASEURL",
"JUPYTER_PASSWORD",
"JUPYTER_PORT"
] |
[]
|
["JUPYTER_BASEURL", "JUPYTER_PASSWORD", "JUPYTER_PORT"]
|
python
| 3 | 0 | |
mc/debug.go
|
package mc
import (
"log"
"os"
"strings"
)
var (
dlog = log.New(os.Stderr, "mc: ", log.Lmicroseconds|log.Lshortfile)
debug = strings.Contains(os.Getenv("STTRACE"), "mc")
)
|
[
"\"STTRACE\""
] |
[] |
[
"STTRACE"
] |
[]
|
["STTRACE"]
|
go
| 1 | 0 | |
go/vt/mysqlctl/mysqld.go
|
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Commands for controlling an external mysql process.
Some commands are issued as exec'd tools, some are handled by connecting via
the mysql protocol.
*/
package mysqlctl
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"time"
"bytes"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/mysql"
"github.com/youtube/vitess/go/stats"
"github.com/youtube/vitess/go/vt/dbconfigs"
"github.com/youtube/vitess/go/vt/dbconnpool"
vtenv "github.com/youtube/vitess/go/vt/env"
"github.com/youtube/vitess/go/vt/hook"
"github.com/youtube/vitess/go/vt/mysqlctl/mysqlctlclient"
"golang.org/x/net/context"
)
var (
dbaPoolSize = flag.Int("dba_pool_size", 20, "Size of the connection pool for dba connections")
dbaIdleTimeout = flag.Duration("dba_idle_timeout", time.Minute, "Idle timeout for dba connections")
appPoolSize = flag.Int("app_pool_size", 40, "Size of the connection pool for app connections")
appIdleTimeout = flag.Duration("app_idle_timeout", time.Minute, "Idle timeout for app connections")
socketFile = flag.String("mysqlctl_socket", "", "socket file to use for remote mysqlctl actions (empty for local actions)")
mycnfTemplateFile = flag.String("mysqlctl_mycnf_template", "", "template file to use for generating the my.cnf file during server init")
// masterConnectRetry is used in 'SET MASTER' commands
masterConnectRetry = flag.Duration("master_connect_retry", 10*time.Second, "how long to wait in between slave -> connection attempts. Only precise to the second.")
dbaMysqlStats = stats.NewTimings("MysqlDba")
allprivsMysqlStats = stats.NewTimings("MysqlAllPrivs")
appMysqlStats = stats.NewTimings("MysqlApp")
)
// Mysqld is the object that represents a mysqld daemon running on this server.
type Mysqld struct {
config *Mycnf
dbcfgs *dbconfigs.DBConfigs
dbaPool *dbconnpool.ConnectionPool
appPool *dbconnpool.ConnectionPool
tabletDir string
// mutex protects the fields below.
mutex sync.Mutex
onTermFuncs []func()
cancelWaitCmd chan struct{}
}
// NewMysqld creates a Mysqld object based on the provided configuration
// and connection parameters.
func NewMysqld(config *Mycnf, dbcfgs *dbconfigs.DBConfigs, dbconfigsFlags dbconfigs.DBConfigFlag) *Mysqld {
result := &Mysqld{
config: config,
dbcfgs: dbcfgs,
tabletDir: path.Dir(config.DataDir),
}
// Create and open the connection pool for dba access.
if dbconfigs.DbaConfig&dbconfigsFlags != 0 {
result.dbaPool = dbconnpool.NewConnectionPool("DbaConnPool", *dbaPoolSize, *dbaIdleTimeout)
result.dbaPool.Open(&dbcfgs.Dba, dbaMysqlStats)
}
// Create and open the connection pool for app access.
if dbconfigs.AppConfig&dbconfigsFlags != 0 {
result.appPool = dbconnpool.NewConnectionPool("AppConnPool", *appPoolSize, *appIdleTimeout)
result.appPool.Open(&dbcfgs.App, appMysqlStats)
}
return result
}
// Cnf returns the mysql config for the daemon
func (mysqld *Mysqld) Cnf() *Mycnf {
return mysqld.config
}
// TabletDir returns the main tablet directory.
// It's a method so it can be accessed through the MysqlDaemon interface.
func (mysqld *Mysqld) TabletDir() string {
return mysqld.tabletDir
}
// RunMysqlUpgrade will run the mysql_upgrade program on the current
// install. Will be called only when mysqld is running with no
// network and no grant tables.
func (mysqld *Mysqld) RunMysqlUpgrade() error {
// Execute as remote action on mysqlctld if requested.
if *socketFile != "" {
log.Infof("executing Mysqld.RunMysqlUpgrade() remotely via mysqlctld server: %v", *socketFile)
client, err := mysqlctlclient.New("unix", *socketFile)
if err != nil {
return fmt.Errorf("can't dial mysqlctld: %v", err)
}
defer client.Close()
return client.RunMysqlUpgrade(context.TODO())
}
// Find mysql_upgrade. If not there, we do nothing.
dir, err := vtenv.VtMysqlRoot()
if err != nil {
log.Warningf("VT_MYSQL_ROOT not set, skipping mysql_upgrade step: %v", err)
return nil
}
name, err := binaryPath(dir, "mysql_upgrade")
if err != nil {
log.Warningf("mysql_upgrade binary not present, skipping it: %v", err)
return nil
}
// Since we started mysql with --skip-grant-tables, we should
// be able to run mysql_upgrade without any valid user or
// password. However, mysql_upgrade executes a 'flush
// privileges' right in the middle, and then subsequent
// commands fail if we don't use valid credentials. So let's
// use dba credentials.
params, err := dbconfigs.WithCredentials(&mysqld.dbcfgs.Dba)
if err != nil {
return err
}
cnf, err := mysqld.defaultsExtraFile(¶ms)
if err != nil {
return err
}
defer os.Remove(cnf)
// Run the program, if it fails, we fail. Note in this
// moment, mysqld is running with no grant tables on the local
// socket only, so this doesn't need any user or password.
args := []string{
// --defaults-file=* must be the first arg.
"--defaults-file=" + cnf,
"--force", // Don't complain if it's already been upgraded.
}
cmd := exec.Command(name, args...)
cmd.Env = []string{os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql")}
out, err := cmd.CombinedOutput()
log.Infof("mysql_upgrade output: %s", out)
return err
}
// Start will start the mysql daemon, either by running the
// 'mysqld_start' hook, or by running mysqld_safe in the background.
// If a mysqlctld address is provided in a flag, Start will run
// remotely. When waiting for mysqld to start, we will use
// the dba user.
func (mysqld *Mysqld) Start(ctx context.Context, mysqldArgs ...string) error {
// Execute as remote action on mysqlctld if requested.
if *socketFile != "" {
log.Infof("executing Mysqld.Start() remotely via mysqlctld server: %v", *socketFile)
client, err := mysqlctlclient.New("unix", *socketFile)
if err != nil {
return fmt.Errorf("can't dial mysqlctld: %v", err)
}
defer client.Close()
return client.Start(ctx, mysqldArgs...)
}
if err := mysqld.startNoWait(ctx, mysqldArgs...); err != nil {
return err
}
return mysqld.Wait(ctx)
}
// startNoWait is the internal version of Start, and it doesn't wait.
func (mysqld *Mysqld) startNoWait(ctx context.Context, mysqldArgs ...string) error {
var name string
ts := fmt.Sprintf("Mysqld.Start(%v)", time.Now().Unix())
// try the mysqld start hook, if any
switch hr := hook.NewHook("mysqld_start", mysqldArgs).Execute(); hr.ExitStatus {
case hook.HOOK_SUCCESS:
// hook exists and worked, we can keep going
name = "mysqld_start hook"
case hook.HOOK_DOES_NOT_EXIST:
// hook doesn't exist, run mysqld_safe ourselves
log.Infof("%v: No mysqld_start hook, running mysqld_safe directly", ts)
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name, err = binaryPath(dir, "mysqld_safe")
if err != nil {
log.Warningf("%v: trying to launch mysqld instead", err)
name, err = binaryPath(dir, "mysqld")
// If this also fails, return an error.
if err != nil {
return err
}
}
arg := []string{
"--defaults-file=" + mysqld.config.path}
arg = append(arg, mysqldArgs...)
env := []string{os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql")}
cmd := exec.Command(name, arg...)
cmd.Dir = dir
cmd.Env = env
log.Infof("%v %#v", ts, cmd)
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
go func() {
scanner := bufio.NewScanner(stderr)
for scanner.Scan() {
log.Infof("%v stderr: %v", ts, scanner.Text())
}
}()
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
log.Infof("%v stdout: %v", ts, scanner.Text())
}
}()
err = cmd.Start()
if err != nil {
return err
}
mysqld.mutex.Lock()
mysqld.cancelWaitCmd = make(chan struct{})
go func(cancel <-chan struct{}) {
// Wait regardless of cancel, so we don't generate defunct processes.
err := cmd.Wait()
log.Infof("%v exit: %v", ts, err)
// The process exited. Trigger OnTerm callbacks, unless we were cancelled.
select {
case <-cancel:
default:
mysqld.mutex.Lock()
for _, callback := range mysqld.onTermFuncs {
go callback()
}
mysqld.mutex.Unlock()
}
}(mysqld.cancelWaitCmd)
mysqld.mutex.Unlock()
default:
// hook failed, we report error
return fmt.Errorf("mysqld_start hook failed: %v", hr.String())
}
return nil
}
// Wait returns nil when mysqld is up and accepting connections. It
// will use the dba credentials to try to connect. Use wait() with
// different credentials if needed.
func (mysqld *Mysqld) Wait(ctx context.Context) error {
params, err := dbconfigs.WithCredentials(&mysqld.dbcfgs.Dba)
if err != nil {
return err
}
return mysqld.wait(ctx, params)
}
// wait is the internal version of Wait, that takes credentials.
func (mysqld *Mysqld) wait(ctx context.Context, params mysql.ConnParams) error {
log.Infof("Waiting for mysqld socket file (%v) to be ready...", mysqld.config.SocketFile)
for {
select {
case <-ctx.Done():
return errors.New("deadline exceeded waiting for mysqld socket file to appear: " + mysqld.config.SocketFile)
default:
}
_, statErr := os.Stat(mysqld.config.SocketFile)
if statErr == nil {
// Make sure the socket file isn't stale.
conn, connErr := mysql.Connect(ctx, ¶ms)
if connErr == nil {
conn.Close()
return nil
}
log.Infof("mysqld socket file exists, but can't connect: %v", connErr)
} else if !os.IsNotExist(statErr) {
return fmt.Errorf("can't stat mysqld socket file: %v", statErr)
}
time.Sleep(100 * time.Millisecond)
}
}
// Shutdown will stop the mysqld daemon that is running in the background.
//
// waitForMysqld: should the function block until mysqld has stopped?
// This can actually take a *long* time if the buffer cache needs to be fully
// flushed - on the order of 20-30 minutes.
//
// If a mysqlctld address is provided in a flag, Shutdown will run remotely.
func (mysqld *Mysqld) Shutdown(ctx context.Context, waitForMysqld bool) error {
log.Infof("Mysqld.Shutdown")
// Execute as remote action on mysqlctld if requested.
if *socketFile != "" {
log.Infof("executing Mysqld.Shutdown() remotely via mysqlctld server: %v", *socketFile)
client, err := mysqlctlclient.New("unix", *socketFile)
if err != nil {
return fmt.Errorf("can't dial mysqlctld: %v", err)
}
defer client.Close()
return client.Shutdown(ctx, waitForMysqld)
}
// We're shutting down on purpose. We no longer want to be notified when
// mysqld terminates.
mysqld.mutex.Lock()
if mysqld.cancelWaitCmd != nil {
close(mysqld.cancelWaitCmd)
mysqld.cancelWaitCmd = nil
}
mysqld.mutex.Unlock()
// possibly mysql is already shutdown, check for a few files first
_, socketPathErr := os.Stat(mysqld.config.SocketFile)
_, pidPathErr := os.Stat(mysqld.config.PidFile)
if os.IsNotExist(socketPathErr) && os.IsNotExist(pidPathErr) {
log.Warningf("assuming mysqld already shut down - no socket, no pid file found")
return nil
}
// try the mysqld shutdown hook, if any
h := hook.NewSimpleHook("mysqld_shutdown")
hr := h.Execute()
switch hr.ExitStatus {
case hook.HOOK_SUCCESS:
// hook exists and worked, we can keep going
case hook.HOOK_DOES_NOT_EXIST:
// hook doesn't exist, try mysqladmin
log.Infof("No mysqld_shutdown hook, running mysqladmin directly")
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name, err := binaryPath(dir, "mysqladmin")
if err != nil {
return err
}
params, err := dbconfigs.WithCredentials(&mysqld.dbcfgs.Dba)
if err != nil {
return err
}
cnf, err := mysqld.defaultsExtraFile(¶ms)
if err != nil {
return err
}
defer os.Remove(cnf)
args := []string{
"--defaults-extra-file=" + cnf,
"shutdown",
}
env := []string{
os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql"),
}
_, _, err = execCmd(name, args, env, dir, nil)
if err != nil {
return err
}
default:
// hook failed, we report error
return fmt.Errorf("mysqld_shutdown hook failed: %v", hr.String())
}
// Wait for mysqld to really stop. Use the socket and pid files as a
// proxy for that since we can't call wait() in a process we
// didn't start.
if waitForMysqld {
log.Infof("Mysqld.Shutdown: waiting for socket file (%v) and pid file (%v) to disappear",
mysqld.config.SocketFile, mysqld.config.PidFile)
for {
select {
case <-ctx.Done():
return errors.New("gave up waiting for mysqld to stop")
default:
}
_, socketPathErr = os.Stat(mysqld.config.SocketFile)
_, pidPathErr = os.Stat(mysqld.config.PidFile)
if os.IsNotExist(socketPathErr) && os.IsNotExist(pidPathErr) {
return nil
}
time.Sleep(100 * time.Millisecond)
}
}
return nil
}
// execCmd searches the PATH for a command and runs it, logging the output.
// If input is not nil, pipe it to the command's stdin.
func execCmd(name string, args, env []string, dir string, input io.Reader) (cmd *exec.Cmd, output string, err error) {
cmdPath, _ := exec.LookPath(name)
log.Infof("execCmd: %v %v %v", name, cmdPath, args)
cmd = exec.Command(cmdPath, args...)
cmd.Env = env
cmd.Dir = dir
if input != nil {
cmd.Stdin = input
}
out, err := cmd.CombinedOutput()
output = string(out)
if err != nil {
log.Infof("execCmd: %v failed: %v", name, err)
err = fmt.Errorf("%v: %v, output: %v", name, err, output)
}
log.Infof("execCmd: %v output: %v", name, output)
return cmd, output, err
}
// binaryPath does a limited path lookup for a command,
// searching only within sbin and bin in the given root.
func binaryPath(root, binary string) (string, error) {
subdirs := []string{"sbin", "bin"}
for _, subdir := range subdirs {
binPath := path.Join(root, subdir, binary)
if _, err := os.Stat(binPath); err == nil {
return binPath, nil
}
}
return "", fmt.Errorf("%s not found in any of %s/{%s}",
binary, root, strings.Join(subdirs, ","))
}
// InitConfig will create the default directory structure for the mysqld process,
// generate / configure a my.cnf file.
func (mysqld *Mysqld) InitConfig() error {
log.Infof("mysqlctl.InitConfig")
err := mysqld.createDirs()
if err != nil {
log.Errorf("%s", err.Error())
return err
}
root, err := vtenv.VtRoot()
if err != nil {
log.Errorf("%s", err.Error())
return err
}
// Set up config files.
if err = mysqld.initConfig(root, mysqld.config.path); err != nil {
log.Errorf("failed creating %v: %v", mysqld.config.path, err)
return err
}
return nil
}
// Init will create the default directory structure for the mysqld process,
// generate / configure a my.cnf file install a skeleton database,
// and apply the provided initial SQL file.
func (mysqld *Mysqld) Init(ctx context.Context, initDBSQLFile string) error {
log.Infof("mysqlctl.Init")
err := mysqld.InitConfig()
if err != nil {
log.Errorf("%s", err.Error())
return err
}
// Install data dir.
if err = mysqld.installDataDir(); err != nil {
return err
}
// Start mysqld. We do not use Start, as we have to wait using
// the root user.
if err = mysqld.startNoWait(ctx); err != nil {
log.Errorf("failed starting mysqld (check mysql error log %v for more info): %v", mysqld.config.ErrorLogPath, err)
return err
}
// Wait for mysqld to be ready, using root credentials, as no
// user is created yet.
params := mysql.ConnParams{
Uname: "root",
Charset: "utf8",
UnixSocket: mysqld.config.SocketFile,
}
if err = mysqld.wait(ctx, params); err != nil {
log.Errorf("failed starting mysqld in time (check mysyql error log %v for more info): %v", mysqld.config.ErrorLogPath, err)
return err
}
// Run initial SQL file.
sqlFile, err := os.Open(initDBSQLFile)
if err != nil {
return fmt.Errorf("can't open init_db_sql_file (%v): %v", initDBSQLFile, err)
}
defer sqlFile.Close()
if err := mysqld.executeMysqlScript(¶ms, sqlFile); err != nil {
return fmt.Errorf("can't run init_db_sql_file (%v): %v", initDBSQLFile, err)
}
return nil
}
// MySQL 5.7 GA and up have deprecated mysql_install_db.
// Instead, initialization is built into mysqld.
func useMysqldInitialize(version string) bool {
return strings.Contains(version, "Ver 5.7.") ||
strings.Contains(version, "Ver 8.0.")
}
func (mysqld *Mysqld) installDataDir() error {
mysqlRoot, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
mysqldPath, err := binaryPath(mysqlRoot, "mysqld")
if err != nil {
return err
}
mysqlBaseDir, err := vtenv.VtMysqlBaseDir()
if err != nil {
return err
}
// Check mysqld version.
_, version, err := execCmd(mysqldPath, []string{"--version"}, nil, mysqlRoot, nil)
if err != nil {
return err
}
if useMysqldInitialize(version) {
log.Infof("Installing data dir with mysqld --initialize-insecure")
args := []string{
"--defaults-file=" + mysqld.config.path,
"--basedir=" + mysqlBaseDir,
"--initialize-insecure", // Use empty 'root'@'localhost' password.
}
if _, _, err = execCmd(mysqldPath, args, nil, mysqlRoot, nil); err != nil {
log.Errorf("mysqld --initialize-insecure failed: %v", err)
return err
}
return nil
}
log.Infof("Installing data dir with mysql_install_db")
args := []string{
"--defaults-file=" + mysqld.config.path,
"--basedir=" + mysqlBaseDir,
}
cmdPath, err := binaryPath(mysqlRoot, "mysql_install_db")
if err != nil {
return err
}
if _, _, err = execCmd(cmdPath, args, nil, mysqlRoot, nil); err != nil {
log.Errorf("mysql_install_db failed: %v", err)
return err
}
return nil
}
func (mysqld *Mysqld) initConfig(root, outFile string) error {
var err error
var configData string
switch hr := hook.NewSimpleHook("make_mycnf").Execute(); hr.ExitStatus {
case hook.HOOK_DOES_NOT_EXIST:
log.Infof("make_mycnf hook doesn't exist, reading template files")
configData, err = mysqld.config.makeMycnf(getMycnfTemplates(root))
case hook.HOOK_SUCCESS:
configData, err = mysqld.config.fillMycnfTemplate(hr.Stdout)
default:
return fmt.Errorf("make_mycnf hook failed(%v): %v", hr.ExitStatus, hr.Stderr)
}
if err != nil {
return err
}
return ioutil.WriteFile(outFile, []byte(configData), 0664)
}
func getMycnfTemplates(root string) []string {
if *mycnfTemplateFile != "" {
return []string{*mycnfTemplateFile}
}
cnfTemplatePaths := []string{
path.Join(root, "config/mycnf/default.cnf"),
path.Join(root, "config/mycnf/master.cnf"),
path.Join(root, "config/mycnf/replica.cnf"),
}
if extraCnf := os.Getenv("EXTRA_MY_CNF"); extraCnf != "" {
parts := strings.Split(extraCnf, ":")
cnfTemplatePaths = append(cnfTemplatePaths, parts...)
}
return cnfTemplatePaths
}
// RefreshConfig attempts to recreate the my.cnf from templates, and log and
// swap in to place if it's updated. It keeps a copy of the last version in case fallback is required.
// Should be called from a stable replica, server_id is not regenerated.
func (mysqld *Mysqld) RefreshConfig() error {
log.Info("Checking for updates to my.cnf")
root, err := vtenv.VtRoot()
if err != nil {
return err
}
f, err := ioutil.TempFile(path.Dir(mysqld.config.path), "my.cnf")
if err != nil {
return fmt.Errorf("Could not create temp file: %v", err)
}
defer os.Remove(f.Name())
err = mysqld.initConfig(root, f.Name())
if err != nil {
return fmt.Errorf("Could not initConfig in %v: %v", f.Name(), err)
}
existing, err := ioutil.ReadFile(mysqld.config.path)
if err != nil {
return fmt.Errorf("Could not read existing file %v: %v", mysqld.config.path, err)
}
updated, err := ioutil.ReadFile(f.Name())
if err != nil {
return fmt.Errorf("Could not read updated file %v: %v", f.Name(), err)
}
if bytes.Equal(existing, updated) {
log.Infof("No changes to my.cnf. Continuing.")
return nil
}
backupPath := mysqld.config.path + ".previous"
err = os.Rename(mysqld.config.path, backupPath)
if err != nil {
return fmt.Errorf("Could not back up existing %v: %v", mysqld.config.path, err)
}
err = os.Rename(f.Name(), mysqld.config.path)
if err != nil {
return fmt.Errorf("Could not move %v to %v: %v", f.Name(), mysqld.config.path, err)
}
log.Infof("Updated my.cnf. Backup of previous version available in %v", backupPath)
return nil
}
// ReinitConfig updates the config file as if Mysqld is initializing. At the
// moment it only randomizes ServerID because it's not safe to restore a replica
// from a backup and then give it the same ServerID as before, MySQL can then
// skip transactions in the replication stream with the same server_id.
func (mysqld *Mysqld) ReinitConfig(ctx context.Context) error {
log.Infof("Mysqld.ReinitConfig")
// Execute as remote action on mysqlctld if requested.
if *socketFile != "" {
log.Infof("executing Mysqld.ReinitConfig() remotely via mysqlctld server: %v", *socketFile)
client, err := mysqlctlclient.New("unix", *socketFile)
if err != nil {
return fmt.Errorf("can't dial mysqlctld: %v", err)
}
defer client.Close()
return client.ReinitConfig(ctx)
}
if err := mysqld.config.RandomizeMysqlServerID(); err != nil {
return err
}
root, err := vtenv.VtRoot()
if err != nil {
return err
}
return mysqld.initConfig(root, mysqld.config.path)
}
func (mysqld *Mysqld) createDirs() error {
log.Infof("creating directory %s", mysqld.tabletDir)
if err := os.MkdirAll(mysqld.tabletDir, os.ModePerm); err != nil {
return err
}
for _, dir := range TopLevelDirs() {
if err := mysqld.createTopDir(dir); err != nil {
return err
}
}
for _, dir := range mysqld.config.directoryList() {
log.Infof("creating directory %s", dir)
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return err
}
// FIXME(msolomon) validate permissions?
}
return nil
}
// createTopDir creates a top level directory under TabletDir.
// However, if a directory of the same name already exists under
// vtenv.VtDataRoot(), it creates a directory named after the tablet
// id under that directory, and then creates a symlink under TabletDir
// that points to the newly created directory. For example, if
// /vt/data is present, it will create the following structure:
// /vt/data/vt_xxxx /vt/vt_xxxx/data -> /vt/data/vt_xxxx
func (mysqld *Mysqld) createTopDir(dir string) error {
vtname := path.Base(mysqld.tabletDir)
target := path.Join(vtenv.VtDataRoot(), dir)
_, err := os.Lstat(target)
if err != nil {
if os.IsNotExist(err) {
topdir := path.Join(mysqld.tabletDir, dir)
log.Infof("creating directory %s", topdir)
return os.MkdirAll(topdir, os.ModePerm)
}
return err
}
linkto := path.Join(target, vtname)
source := path.Join(mysqld.tabletDir, dir)
log.Infof("creating directory %s", linkto)
err = os.MkdirAll(linkto, os.ModePerm)
if err != nil {
return err
}
log.Infof("creating symlink %s -> %s", source, linkto)
return os.Symlink(linkto, source)
}
// Teardown will shutdown the running daemon, and delete the root directory.
func (mysqld *Mysqld) Teardown(ctx context.Context, force bool) error {
log.Infof("mysqlctl.Teardown")
if err := mysqld.Shutdown(ctx, true); err != nil {
log.Warningf("failed mysqld shutdown: %v", err.Error())
if !force {
return err
}
}
var removalErr error
for _, dir := range TopLevelDirs() {
qdir := path.Join(mysqld.tabletDir, dir)
if err := deleteTopDir(qdir); err != nil {
removalErr = err
}
}
return removalErr
}
func deleteTopDir(dir string) (removalErr error) {
fi, err := os.Lstat(dir)
if err != nil {
log.Errorf("error deleting dir %v: %v", dir, err.Error())
removalErr = err
} else if fi.Mode()&os.ModeSymlink != 0 {
target, err := filepath.EvalSymlinks(dir)
if err != nil {
log.Errorf("could not resolve symlink %v: %v", dir, err.Error())
removalErr = err
}
log.Infof("remove data dir (symlinked) %v", target)
if err = os.RemoveAll(target); err != nil {
log.Errorf("failed removing %v: %v", target, err.Error())
removalErr = err
}
}
log.Infof("remove data dir %v", dir)
if err = os.RemoveAll(dir); err != nil {
log.Errorf("failed removing %v: %v", dir, err.Error())
removalErr = err
}
return
}
// executeMysqlScript executes a .sql script from an io.Reader with the mysql
// command line tool. It uses the connParams as is, not adding credentials.
func (mysqld *Mysqld) executeMysqlScript(connParams *mysql.ConnParams, sql io.Reader) error {
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name, err := binaryPath(dir, "mysql")
if err != nil {
return err
}
cnf, err := mysqld.defaultsExtraFile(connParams)
if err != nil {
return err
}
defer os.Remove(cnf)
args := []string{
"--defaults-extra-file=" + cnf,
"--batch",
}
env := []string{
"LD_LIBRARY_PATH=" + path.Join(dir, "lib/mysql"),
}
_, _, err = execCmd(name, args, env, dir, sql)
if err != nil {
return err
}
return nil
}
// defaultsExtraFile returns the filename for a temporary config file
// that contains the user, password and socket file to connect to
// mysqld. We write a temporary config file so the password is never
// passed as a command line parameter. Note ioutil.TempFile uses 0600
// as permissions, so only the local user can read the file. The
// returned temporary file should be removed after use, typically in a
// 'defer os.Remove()' statement.
func (mysqld *Mysqld) defaultsExtraFile(connParams *mysql.ConnParams) (string, error) {
var contents string
if connParams.UnixSocket == "" {
contents = fmt.Sprintf(`
[client]
user=%v
password=%v
host=%v
port=%v
`, connParams.Uname, connParams.Pass, connParams.Host, connParams.Port)
} else {
contents = fmt.Sprintf(`
[client]
user=%v
password=%v
socket=%v
`, connParams.Uname, connParams.Pass, connParams.UnixSocket)
}
tmpfile, err := ioutil.TempFile("", "example")
if err != nil {
return "", err
}
name := tmpfile.Name()
if _, err := tmpfile.Write([]byte(contents)); err != nil {
tmpfile.Close()
os.Remove(name)
return "", err
}
if err := tmpfile.Close(); err != nil {
os.Remove(name)
return "", err
}
return name, nil
}
// GetAppConnection returns a connection from the app pool.
// Recycle needs to be called on the result.
func (mysqld *Mysqld) GetAppConnection(ctx context.Context) (*dbconnpool.PooledDBConnection, error) {
return mysqld.appPool.Get(ctx)
}
// GetDbaConnection creates a new DBConnection.
func (mysqld *Mysqld) GetDbaConnection() (*dbconnpool.DBConnection, error) {
return dbconnpool.NewDBConnection(&mysqld.dbcfgs.Dba, dbaMysqlStats)
}
// GetAllPrivsConnection creates a new DBConnection.
func (mysqld *Mysqld) GetAllPrivsConnection() (*dbconnpool.DBConnection, error) {
return dbconnpool.NewDBConnection(&mysqld.dbcfgs.AllPrivs, allprivsMysqlStats)
}
// Close will close this instance of Mysqld. It will wait for all dba
// queries to be finished.
func (mysqld *Mysqld) Close() {
if mysqld.dbaPool != nil {
mysqld.dbaPool.Close()
}
if mysqld.appPool != nil {
mysqld.appPool.Close()
}
}
// OnTerm registers a function to be called if mysqld terminates for any
// reason other than a call to Mysqld.Shutdown(). This only works if mysqld
// was actually started by calling Start() on this Mysqld instance.
func (mysqld *Mysqld) OnTerm(f func()) {
mysqld.mutex.Lock()
defer mysqld.mutex.Unlock()
mysqld.onTermFuncs = append(mysqld.onTermFuncs, f)
}
|
[
"\"EXTRA_MY_CNF\""
] |
[] |
[
"EXTRA_MY_CNF"
] |
[]
|
["EXTRA_MY_CNF"]
|
go
| 1 | 0 | |
contrib/devtools/github-merge.py
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 Ybtc Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
from __future__ import division,print_function,unicode_literals
import os
from sys import stdin,stdout,stderr
import argparse
import hashlib
import subprocess
import json,codecs
try:
from urllib.request import Request,urlopen
except:
from urllib2 import Request,urlopen
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
COMMIT_FORMAT = '%h %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError as e:
return default
def retrieve_pr_info(repo,pull):
'''
Retrieve pull request information from github.
Return None if no title can be found, or an error happens.
'''
try:
req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull)
result = urlopen(req)
reader = codecs.getreader('utf-8')
obj = json.load(reader(result))
return obj
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def print_merge_details(pull, title, branch, base_branch, head_branch):
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
githubmerge.host (default: [email protected]),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','[email protected]')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
exit(1)
host_repo = host+":"+repo # shortcut for push/pull target
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull)
if info is None:
exit(1)
title = info['title'].strip()
body = info['body'].strip()
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull,'w')
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*'])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr)
exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
exit(4)
# Put tree SHA512 into the message
try:
first_sha512 = tree_sha512sum()
message += '\n\nTree-SHA512: ' + first_sha512
except subprocess.CalledProcessError as e:
printf("ERROR: Unable to compute tree hash")
exit(4)
try:
subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')])
except subprocess.CalledProcessError as e:
printf("ERROR: Cannot update message.",file=stderr)
exit(4)
print_merge_details(pull, title, branch, base_branch, head_branch)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
exit(8)
# Sign the merge commit.
print_merge_details(pull, title, branch, base_branch, head_branch)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError as e:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
break
elif reply == 'x':
exit(1)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"GIT",
"BASH"
] |
[]
|
["GIT", "BASH"]
|
python
| 2 | 0 | |
app.py
|
import os
import sys
import requests
import re
from contextlib import contextmanager
from flask import Flask, request
from sh import cd, git, soffice
GIT_REMOTE = os.environ['GIT_REMOTE']
app = Flask(__name__)
repo = None
def init():
if os.path.exists('repo'):
if not os.path.isdir('repo/.git'):
sys.stderr.write('repo/ exists, but is not a git repo')
sys.exit(1)
else:
git.clone(GIT_REMOTE, 'repo')
# From http://stackoverflow.com/a/24176022/263998
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def export_as_ods(access_token, spreadsheet_id):
url = 'https://docs.google.com/feeds/download/spreadsheets/Export?key=' + spreadsheet_id + '&exportFormat=ods'
headers = {
'Authorization': 'Bearer ' + access_token
}
return requests.get(url, headers=headers).content
def convert_ods_to_fods(ods_path):
ods_filename = os.path.basename(ods_path)
dest_filename = re.sub('.ods$', '.fods', ods_filename)
dest_dir = os.path.dirname(ods_path) or '.'
soffice('--headless',
'--convert-to', 'fods',
'--outdir', dest_dir,
ods_path)
return os.path.join(dest_dir, dest_filename)
def write_bytes_to_file(filename, bytes):
f = open(filename, 'wb')
f.write(bytes)
f.close()
return filename
@app.route('/initiate_backup', methods=['POST'])
def backup():
access_token = request.form['access_token']
spreadsheet_id = request.form['spreadsheet_id']
with cd('repo/'):
git.pull()
ods = export_as_ods(access_token, spreadsheet_id)
ods_path = write_bytes_to_file('clubs.ods', ods)
fods_path = convert_ods_to_fods(ods_path)
os.remove(ods_path)
# Only commit and push if any files have changed.
if git('ls-files', '-m'):
git.add(fods_path)
git.commit('-m', 'Update spreadsheet.')
git.push()
return 'Consider it done!'
init()
if __name__ == '__main__':
app.run(debug=True)
|
[] |
[] |
[
"GIT_REMOTE"
] |
[]
|
["GIT_REMOTE"]
|
python
| 1 | 0 | |
app/spotify.py
|
import os
import typing
from multiprocessing import Pipe, Process
import uvicorn
from pyfy import ApiError, ClientCreds, Spotify
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
def _code_server(connection):
async def homepage(request):
code = request.query_params["code"]
connection.send(code)
connection.close()
return JSONResponse("ok")
app = Starlette(routes=[Route("/", homepage)])
uvicorn.run(app, host="0.0.0.0", port=4444, log_level="error")
def wait_for_code():
parent_conn, child_conn = Pipe()
p = Process(target=_code_server, args=(child_conn,))
p.start()
code = parent_conn.recv()
p.terminate()
return code
def get_spotify_client(hostname) -> Spotify:
client = ClientCreds(
client_id=os.getenv("SPOTIFY_CLIENT_KEY"),
client_secret=os.getenv("SPOTIFY_CLIENT_SECRET"),
scopes=["user-read-currently-playing", "user-read-recently-played"],
redirect_uri=f"http://{hostname}:4444",
)
return Spotify(client_creds=client)
def authorize(spotify: Spotify) -> None:
print(spotify.auth_uri())
code = wait_for_code()
spotify.build_user_creds(grant=code, set_user_creds=True)
def get_last_song(spotify: Spotify) -> typing.Optional[typing.Dict]:
try:
current_song = spotify.currently_playing()
if current_song:
return current_song["item"]
else:
last_tracks = spotify.recently_played_tracks(limit=1)["items"]
return last_tracks[0]["track"] if last_tracks else None
except ApiError:
return None
|
[] |
[] |
[
"SPOTIFY_CLIENT_KEY",
"SPOTIFY_CLIENT_SECRET"
] |
[]
|
["SPOTIFY_CLIENT_KEY", "SPOTIFY_CLIENT_SECRET"]
|
python
| 2 | 0 | |
providers/nomad/nomad.go
|
package nomad
import (
"context"
"fmt"
"io"
"log"
"os"
"strings"
"time"
"github.com/virtual-kubelet/virtual-kubelet/manager"
"github.com/virtual-kubelet/virtual-kubelet/providers"
nomad "github.com/hashicorp/nomad/api"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apitypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/remotecommand"
)
// Nomad provider constants
const (
jobNamePrefix = "nomad-virtual-kubelet"
nomadDatacentersAnnotation = "nomad.hashicorp.com/datacenters"
defaultNomadAddress = "127.0.0.1:4646"
defaultNomadDatacenter = "dc1"
defaultNomadRegion = "global"
)
// Provider implements the virtual-kubelet provider interface and communicates with the Nomad API.
type Provider struct {
nomadClient *nomad.Client
resourceManager *manager.ResourceManager
nodeName string
operatingSystem string
nomadAddress string
nomadRegion string
cpu string
memory string
pods string
}
// NewProvider creates a new Provider
func NewProvider(rm *manager.ResourceManager, nodeName, operatingSystem string) (*Provider, error) {
p := Provider{}
p.resourceManager = rm
p.nodeName = nodeName
p.operatingSystem = operatingSystem
p.nomadAddress = os.Getenv("NOMAD_ADDR")
p.nomadRegion = os.Getenv("NOMAD_REGION")
if p.nomadAddress == "" {
p.nomadAddress = defaultNomadAddress
}
if p.nomadRegion == "" {
p.nomadRegion = defaultNomadRegion
}
c := nomad.DefaultConfig()
log.Printf("nomad client address: %s", p.nomadAddress)
nomadClient, err := nomad.NewClient(c.ClientConfig(p.nomadRegion, p.nomadAddress, false))
if err != nil {
log.Printf("Unable to create nomad client: %s", err)
return nil, err
}
p.nomadClient = nomadClient
return &p, nil
}
// CreatePod accepts a Pod definition and creates
// a Nomad job
func (p *Provider) CreatePod(ctx context.Context, pod *v1.Pod) error {
log.Printf("CreatePod %q\n", pod.Name)
// Ignore daemonSet Pod
if pod != nil && pod.OwnerReferences != nil && len(pod.OwnerReferences) != 0 && pod.OwnerReferences[0].Kind == "DaemonSet" {
log.Printf("Skip to create DaemonSet pod %q\n", pod.Name)
return nil
}
// Default datacenter name
datacenters := []string{defaultNomadDatacenter}
nomadDatacenters := pod.Annotations[nomadDatacentersAnnotation]
if nomadDatacenters != "" {
datacenters = strings.Split(nomadDatacenters, ",")
}
// Create a list of nomad tasks
nomadTasks := p.createNomadTasks(pod)
taskGroups := p.createTaskGroups(pod.Name, nomadTasks)
job := p.createJob(pod.Name, datacenters, taskGroups)
// Register nomad job
_, _, err := p.nomadClient.Jobs().Register(job, nil)
if err != nil {
return fmt.Errorf("couldn't start nomad job: %q", err)
}
return nil
}
// UpdatePod is a noop, nomad does not support live updates of a pod.
func (p *Provider) UpdatePod(ctx context.Context, pod *v1.Pod) error {
log.Println("Pod Update called: No-op as not implemented")
return nil
}
// DeletePod accepts a Pod definition and deletes a Nomad job.
func (p *Provider) DeletePod(ctx context.Context, pod *v1.Pod) (err error) {
// Deregister job
response, _, err := p.nomadClient.Jobs().Deregister(pod.Name, true, nil)
if err != nil {
return fmt.Errorf("couldn't stop or deregister nomad job: %s: %s", response, err)
}
log.Printf("deregistered nomad job %q response %q\n", pod.Name, response)
return nil
}
// GetPod returns the pod running in the Nomad cluster. returns nil
// if pod is not found.
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (pod *v1.Pod, err error) {
jobID := fmt.Sprintf("%s-%s", jobNamePrefix, name)
// Get nomad job
job, _, err := p.nomadClient.Jobs().Info(jobID, nil)
if err != nil {
return nil, fmt.Errorf("couldn't retrieve nomad job: %s", err)
}
// Get nomad job allocations to get individual task statuses
jobAllocs, _, err := p.nomadClient.Jobs().Allocations(jobID, false, nil)
if err != nil {
return nil, fmt.Errorf("couldn't retrieve nomad job allocations: %s", err)
}
// Change a nomad job into a kubernetes pod
pod, err = p.jobToPod(job, jobAllocs)
if err != nil {
return nil, fmt.Errorf("couldn't convert a nomad job into a pod: %s", err)
}
return pod, nil
}
// GetContainerLogs retrieves the logs of a container by name from the provider.
func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, tail int) (string, error) {
return "", nil
}
// GetPodFullName as defined in the provider context
func (p *Provider) GetPodFullName(ctx context.Context, namespace string, pod string) string {
return fmt.Sprintf("%s-%s", jobNamePrefix, pod)
}
// ExecInContainer executes a command in a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
// TODO: Implementation
func (p *Provider) ExecInContainer(name string, uid apitypes.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
log.Printf("ExecInContainer %q\n", container)
return nil
}
// GetPodStatus returns the status of a pod by name that is running as a job
// in the Nomad cluster returns nil if a pod by that name is not found.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error) {
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, err
}
return &pod.Status, nil
}
// GetPods returns a list of all pods known to be running in Nomad nodes.
func (p *Provider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
log.Printf("GetPods\n")
jobsList, _, err := p.nomadClient.Jobs().PrefixList(jobNamePrefix)
if err != nil {
return nil, fmt.Errorf("couldn't get job list from nomad: %s", err)
}
var pods = []*v1.Pod{}
for _, job := range jobsList {
// Get nomad job
j, _, err := p.nomadClient.Jobs().Info(job.ID, nil)
if err != nil {
return nil, fmt.Errorf("couldn't retrieve nomad job: %s", err)
}
// Get nomad job allocations to get individual task statuses
jobAllocs, _, err := p.nomadClient.Jobs().Allocations(job.ID, false, nil)
if err != nil {
return nil, fmt.Errorf("couldn't retrieve nomad job allocations: %s", err)
}
// Change a nomad job into a kubernetes pod
pod, err := p.jobToPod(j, jobAllocs)
if err != nil {
return nil, fmt.Errorf("couldn't convert a nomad job into a pod: %s", err)
}
pods = append(pods, pod)
}
return pods, nil
}
// Capacity returns a resource list containing the capacity limits set for Nomad.
func (p *Provider) Capacity(ctx context.Context) v1.ResourceList {
// TODO: Use nomad /nodes api to get a list of nodes in the cluster
// and then use the read node /node/:node_id endpoint to calculate
// the total resources of the cluster to report back to kubernetes.
return v1.ResourceList{
"cpu": resource.MustParse("20"),
"memory": resource.MustParse("100Gi"),
"pods": resource.MustParse("20"),
}
}
// NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), for updates to the node status
// within Kubernetes.
func (p *Provider) NodeConditions(ctx context.Context) []v1.NodeCondition {
// TODO: Make these dynamic.
return []v1.NodeCondition{
{
Type: "Ready",
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletReady",
Message: "kubelet is ready.",
},
{
Type: "OutOfDisk",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
},
{
Type: "MemoryPressure",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientMemory",
Message: "kubelet has sufficient memory available",
},
{
Type: "DiskPressure",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasNoDiskPressure",
Message: "kubelet has no disk pressure",
},
{
Type: "NetworkUnavailable",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "RouteCreated",
Message: "RouteController created a route",
},
}
}
// NodeAddresses returns a list of addresses for the node status
// within Kubernetes.
func (p *Provider) NodeAddresses(ctx context.Context) []v1.NodeAddress {
// TODO: Use nomad api to get a list of node addresses.
return nil
}
// NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status
// within Kubernetes.
func (p *Provider) NodeDaemonEndpoints(ctx context.Context) *v1.NodeDaemonEndpoints {
return &v1.NodeDaemonEndpoints{}
}
// OperatingSystem returns the operating system for this provider.
// This is a noop to default to Linux for now.
func (p *Provider) OperatingSystem() string {
return providers.OperatingSystemLinux
}
|
[
"\"NOMAD_ADDR\"",
"\"NOMAD_REGION\""
] |
[] |
[
"NOMAD_REGION",
"NOMAD_ADDR"
] |
[]
|
["NOMAD_REGION", "NOMAD_ADDR"]
|
go
| 2 | 0 | |
pytorch_lightning/utilities/debugging.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from collections import Counter
from functools import wraps
from typing import Callable, Any, Optional
def enabled_only(fn: Callable):
"""Decorate a logger method to run it only on the process with rank 0.
Args:
fn: Function to decorate
"""
@wraps(fn)
def wrapped_fn(self, *args, **kwargs):
if self.enabled:
fn(self, *args, **kwargs)
return wrapped_fn
class InternalDebugger(object):
def __init__(self, trainer):
self.enabled = os.environ.get('PL_DEV_DEBUG', '0') == '1'
self.trainer = trainer
self.logged_metrics = []
self.pbar_added_metrics = []
self.saved_train_losses = []
self.saved_val_losses = []
self.saved_test_losses = []
self.early_stopping_history = []
self.checkpoint_callback_history = []
self.events = []
self.saved_lr_scheduler_updates = []
self.train_dataloader_calls = []
self.val_dataloader_calls = []
self.test_dataloader_calls = []
self.dataloader_sequence_calls = []
def track_event(
self,
evt_type: str,
evt_value: Any = None,
global_rank: Optional[int] = None,
local_rank: Optional[int] = None,
comment: str = ''
) -> None:
self.events.append({
"timestamp": time.time(),
"event": evt_type,
"value": evt_value,
"global_rank": global_rank,
"local_rank": local_rank,
"comment": comment,
})
def count_events(self, evt_type: str, strict=False) -> int:
count = 0
for evt in self.events:
if strict and evt["event"] == evt_type:
count += 1
elif not strict and evt_type in evt["event"]:
count += 1
return count
@enabled_only
def track_load_dataloader_call(self, name, dataloaders):
loader_counts = len(dataloaders)
lengths = []
for dl in dataloaders:
try:
length = len(dl)
except Exception as e:
length = -1
lengths.append(length)
values = {
'global_step': self.trainer.global_step,
'epoch': self.trainer.current_epoch,
'num_loaders': loader_counts,
'lengths': lengths,
'name': name
}
# track the sequence in case we need to verify the sequence
self.dataloader_sequence_calls.append(values)
if 'train' in name:
self.train_dataloader_calls.append(values)
elif 'val' in name:
self.val_dataloader_calls.append(values)
elif 'test' in name:
self.test_dataloader_calls.append(values)
@enabled_only
def track_logged_metrics_history(self, scalar_metrics):
scalar_metrics['global_step'] = self.trainer.global_step
self.logged_metrics.append(scalar_metrics)
@enabled_only
def track_train_loss_history(self, batch_idx, loss):
loss_dict = {'batch_idx': batch_idx, 'epoch': self.trainer.current_epoch, 'loss': loss.detach()}
self.saved_train_losses.append(loss_dict)
@enabled_only
def track_lr_schedulers_update(self, batch_idx, interval, scheduler_idx, old_lr, new_lr, monitor_key=None):
loss_dict = {
'batch_idx': batch_idx,
'interval': interval,
'scheduler_idx': scheduler_idx,
'epoch': self.trainer.current_epoch,
'monitor_key': monitor_key,
'old_lr': old_lr,
'new_lr': new_lr
}
self.saved_lr_scheduler_updates.append(loss_dict)
@enabled_only
def track_eval_loss_history(self, test_mode, batch_idx, dataloader_idx, output):
loss_dict = {
'sanity_check': self.trainer.running_sanity_check,
'dataloader_idx': dataloader_idx,
'batch_idx': batch_idx,
'epoch': self.trainer.current_epoch,
'output': output
}
if test_mode:
self.saved_test_losses.append(loss_dict)
else:
self.saved_val_losses.append(loss_dict)
@enabled_only
def track_pbar_metrics_history(self, metrics):
metrics['debug_epoch'] = self.trainer.current_epoch
self.pbar_added_metrics.append(metrics)
@enabled_only
def track_early_stopping_history(self, callback, current):
debug_dict = {
'epoch': self.trainer.current_epoch,
'global_step': self.trainer.global_step,
'rank': self.trainer.global_rank,
'current': current,
'best': callback.best_score,
'patience': callback.wait_count
}
self.early_stopping_history.append(debug_dict)
@enabled_only
def track_checkpointing_history(self, filepath):
cb = self.trainer.checkpoint_callback
debug_dict = {
'epoch': self.trainer.current_epoch,
'global_step': self.trainer.global_step,
'monitor': cb.monitor,
'rank': self.trainer.global_rank,
'filepath': filepath
}
self.checkpoint_callback_history.append(debug_dict)
@property
def num_seen_sanity_check_batches(self):
count = len([x for x in self.saved_val_losses if x['sanity_check']])
return count
@property
def num_seen_val_check_batches(self):
counts = Counter()
for x in self.saved_val_losses:
if not x['sanity_check']:
counts.update({x['dataloader_idx']: 1})
return counts
@property
def num_seen_test_check_batches(self):
counts = Counter()
for x in self.saved_test_losses:
if not x['sanity_check']:
counts.update({x['dataloader_idx']: 1})
return counts
|
[] |
[] |
[
"PL_DEV_DEBUG"
] |
[]
|
["PL_DEV_DEBUG"]
|
python
| 1 | 0 | |
tau/core/utils.py
|
import os
import requests
from django.conf import settings
from constance import config
def check_access_token():
url = "https://id.twitch.tv/oauth2/validate"
access_token = config.TWITCH_ACCESS_TOKEN
headers = {"Authorization": f"OAuth {access_token}"}
req = requests.get(url, headers=headers)
data = req.json()
if "status" in data and int(data["status"]) == 401:
return False
else:
return True
def refresh_access_token():
refresh_token = config.TWITCH_REFRESH_TOKEN
client_id = os.environ.get('TWITCH_APP_ID', None)
client_secret = os.environ.get('TWITCH_CLIENT_SECRET', None)
req = requests.post('https://id.twitch.tv/oauth2/token', data={
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
'refresh_token': refresh_token
})
data = req.json()
if 'access_token' in data:
config.TWITCH_REFRESH_TOKEN = data['refresh_token']
config.TWITCH_ACCESS_TOKEN = data['access_token']
else:
print('[ERROR] Could not refresh access token.')
def get_all_statuses():
keys = [
'STATUS_WEBSOCKET',
'STATUS_CHANNEL_UPDATE',
'STATUS_CHANNEL_FOLLOW',
'STATUS_CHANNEL_CHEER',
'STATUS_CHANNEL_POINT_REDEMPTION',
'STATUS_CHANNEL_RAID',
'STATUS_CHANNEL_HYPE_TRAIN_BEGIN',
'STATUS_CHANNEL_HYPE_TRAIN_PROGRESS',
'STATUS_CHANNEL_HYPE_TRAIN_END',
]
return [
{'event_type': key, 'old_value': None, 'new_value': getattr(config, key)} for key in keys
]
def setup_ngrok():
# pyngrok will only be installed if it is used.
from pyngrok import ngrok
print('---- Setting up ngrok tunnel ----')
# Get the dev server port (defaults to 8000 for Django, can be overridden with the
# last arg when calling `runserver`)
# addrport = urlparse("https://{}".format(sys.argv[-1]))
# port = addrport.port if addrport.netloc and addrport.port else 8000
port = int(os.environ.get("PORT", 8000))
if os.environ.get("USE_NGROK_TOKEN", 'false').lower() == 'true':
token = os.environ.get("NGROK_TOKEN", None)
ngrok.set_auth_token(token)
# Open an ngrok tunnel to the dev server
public_url = ngrok.connect(port).public_url.replace('http', 'https')
print(f" [Tunnel url: {public_url}]\n")
# Update any base URLs or webhooks to use the public ngrok URL
settings.BASE_URL = public_url
return public_url
|
[] |
[] |
[
"PORT",
"TWITCH_APP_ID",
"TWITCH_CLIENT_SECRET",
"USE_NGROK_TOKEN",
"NGROK_TOKEN"
] |
[]
|
["PORT", "TWITCH_APP_ID", "TWITCH_CLIENT_SECRET", "USE_NGROK_TOKEN", "NGROK_TOKEN"]
|
python
| 5 | 0 | |
cmd/minishift/cmd/start_preflight.go
|
/*
Copyright (C) 2017 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"net"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"github.com/docker/machine/libmachine/drivers"
configCmd "github.com/minishift/minishift/cmd/minishift/cmd/config"
"github.com/minishift/minishift/pkg/minikube/constants"
validations "github.com/minishift/minishift/pkg/minishift/config"
"github.com/minishift/minishift/pkg/minishift/shell/powershell"
minishiftUtil "github.com/minishift/minishift/pkg/minishift/util"
"github.com/minishift/minishift/pkg/util/github"
"github.com/minishift/minishift/pkg/util/os/atexit"
"github.com/spf13/viper"
cmdUtils "github.com/minishift/minishift/cmd/minishift/cmd/util"
openshiftVersion "github.com/minishift/minishift/pkg/minishift/openshift/version"
stringUtils "github.com/minishift/minishift/pkg/util/strings"
)
const (
StorageDisk = "/mnt/?da1"
)
// preflightChecksBeforeStartingHost is executed before the startHost function.
func preflightChecksBeforeStartingHost() {
driverErrorMessage := "See the 'Setting Up the Driver Plug-in' topic (https://docs.openshift.org/latest/minishift/getting-started/setting-up-driver-plugin.html) for more information"
prerequisiteErrorMessage := "See the 'Installing Prerequisites for Minishift' topic (https://docs.openshift.org/latest/minishift/getting-started/installing.html#install-prerequisites) for more information"
preflightCheckSucceedsOrFails(
configCmd.SkipCheckOpenShiftRelease.Name,
checkOriginRelease,
fmt.Sprintf("Checking if requested OpenShift version '%s' is valid", viper.GetString(configCmd.OpenshiftVersion.Name)),
configCmd.WarnCheckOpenShiftRelease.Name,
fmt.Sprintf("%s is not a valid OpenShift version", viper.GetString(configCmd.OpenshiftVersion.Name)),
)
preflightCheckSucceedsOrFails(
configCmd.SkipCheckOpenShiftVersion.Name,
validateOpenshiftVersion,
fmt.Sprintf("Checking if requested OpenShift version '%s' is supported", viper.GetString(configCmd.OpenshiftVersion.Name)),
configCmd.WarnCheckOpenShiftVersion.Name,
fmt.Sprintf("Minishift does not support OpenShift version %s. "+
"You need to use a version >= %s\n", viper.GetString(configCmd.OpenshiftVersion.Name),
constants.MinimumSupportedOpenShiftVersion),
)
preflightCheckSucceedsOrFails(
configCmd.SkipCheckVMDriver.Name,
checkVMDriver,
fmt.Sprintf("Checking if requested hypervisor '%s' is supported on this platform", viper.GetString(configCmd.VmDriver.Name)),
configCmd.WarnCheckVMDriver.Name,
driverErrorMessage)
switch viper.GetString(configCmd.VmDriver.Name) {
case "xhyve":
preflightCheckSucceedsOrFails(
configCmd.SkipCheckXHyveDriver.Name,
checkXhyveDriver,
"Checking if xhyve driver is installed",
configCmd.WarnCheckXHyveDriver.Name,
driverErrorMessage)
case "kvm":
preflightCheckSucceedsOrFails(
configCmd.SkipCheckKVMDriver.Name,
checkKvmDriver,
"Checking if KVM driver is installed",
configCmd.WarnCheckKVMDriver.Name,
driverErrorMessage)
preflightCheckSucceedsOrFails(
configCmd.SkipCheckKVMDriver.Name,
checkLibvirtInstalled,
"Checking if Libvirt is installed",
configCmd.WarnCheckKVMDriver.Name,
driverErrorMessage)
preflightCheckSucceedsOrFails(
configCmd.SkipCheckKVMDriver.Name,
checkLibvirtDefaultNetworkExists,
"Checking if Libvirt default network is present",
configCmd.WarnCheckKVMDriver.Name,
driverErrorMessage)
preflightCheckSucceedsOrFails(
configCmd.SkipCheckKVMDriver.Name,
checkLibvirtDefaultNetworkActive,
"Checking if Libvirt default network is active",
configCmd.WarnCheckKVMDriver.Name,
driverErrorMessage)
case "hyperv":
preflightCheckSucceedsOrFails(
configCmd.SkipCheckHyperVDriver.Name,
checkHypervDriverInstalled,
"Checking if Hyper-V driver is installed",
configCmd.WarnCheckHyperVDriver.Name,
driverErrorMessage)
preflightCheckSucceedsOrFails(
configCmd.SkipCheckHyperVDriver.Name,
checkHypervDriverSwitch,
"Checking if Hyper-V driver is configured to use a Virtual Switch",
configCmd.WarnCheckHyperVDriver.Name,
driverErrorMessage)
preflightCheckSucceedsOrFails(
configCmd.SkipCheckHyperVDriver.Name,
checkHypervDriverUser,
"Checking if user is a member of the Hyper-V Administrators group",
configCmd.WarnCheckHyperVDriver.Name,
driverErrorMessage)
case "virtualbox":
preflightCheckSucceedsOrFails(
configCmd.SkipCheckVBoxInstalled.Name,
checkVBoxInstalled,
"Checking if VirtualBox is installed",
configCmd.WarnCheckVBoxInstalled.Name,
prerequisiteErrorMessage)
}
preflightCheckSucceedsOrFails(
configCmd.SkipCheckIsoUrl.Name,
checkIsoURL,
"Checking the ISO URL",
configCmd.WarnCheckIsoUrl.Name,
"See the 'Basic Usage' topic (https://docs.openshift.org/latest/minishift/using/basic-usage.html) for more information")
}
// preflightChecksAfterStartingHost is executed after the startHost function.
func preflightChecksAfterStartingHost(driver drivers.Driver) {
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipInstanceIP.Name,
checkInstanceIP, driver,
"Checking for IP address",
configCmd.WarnInstanceIP.Name,
"Error determining IP address")
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckNetworkPing.Name,
checkIPConnectivity, driver,
"Checking if external host is reachable from the Minishift VM",
configCmd.WarnCheckNetworkPing.Name,
"VM is unable to ping external host")
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckNetworkHTTP.Name,
checkHttpConnectivity, driver,
"Checking HTTP connectivity from the VM",
configCmd.WarnCheckNetworkHTTP.Name,
"VM cannot connect to external URL with HTTP")
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckStorageMount.Name,
checkStorageMounted, driver,
"Checking if persistent storage volume is mounted",
configCmd.WarnCheckStorageMount.Name,
"Persistent volume storage is not mounted")
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckStorageUsage.Name,
checkStorageUsage, driver,
"Checking available disk space",
configCmd.WarnCheckStorageUsage.Name,
"Insufficient disk space on the persistent storage volume")
}
// preflightCheckFunc returns true when check passed
type preflightCheckFunc func() bool
// preflightCheckFunc used driver to interact with the VM instance and returns
// true when check passed
type preflightCheckWithDriverFunc func(driver drivers.Driver) bool
// preflightCheckSucceedsOrFails executes a pre-flight test function and prints
// the returned status in a standardized way. If the test fails and returns a
// false, the application will exit with errorMessage to describe what the
// cause is. It takes configNameOverrideIfSkipped to allow skipping the test.
// While treatAsWarning and configNameOverrideIfWarning can be used to make the
// test to be treated as a warning instead.
func preflightCheckSucceedsOrFails(configNameOverrideIfSkipped string, execute preflightCheckFunc, message string, configNameOverrideIfWarning string, errorMessage string) {
fmt.Printf("-- %s ... ", message)
isConfiguredToSkip := viper.GetBool(configNameOverrideIfSkipped)
isConfiguredToWarn := viper.GetBool(configNameOverrideIfWarning)
if isConfiguredToSkip {
fmt.Println("SKIP")
return
}
if execute() {
fmt.Println("OK")
return
}
fmt.Println("FAIL")
errorMessage = fmt.Sprintf(" %s", errorMessage)
if isConfiguredToWarn {
fmt.Println(errorMessage)
} else {
atexit.ExitWithMessage(1, errorMessage)
}
}
// preflightCheckSucceedsOrFails executes a pre-flight test function which uses
// the driver to interact with the VM instance. It prints the returned status in
// a standardized way. If the test fails and returns a false, the application
// will exit with errorMessage to describe what the cause is. It takes
// configNameOverrideIfSkipped to allow skipping the test. While treatAsWarning
// and configNameOverrideIfWarning can be used to make the test to be treated as
// a warning instead.
func preflightCheckSucceedsOrFailsWithDriver(configNameOverrideIfSkipped string, execute preflightCheckWithDriverFunc, driver drivers.Driver, message string, configNameOverrideIfWarning string, errorMessage string) {
fmt.Printf("-- %s ... ", message)
isConfiguredToSkip := viper.GetBool(configNameOverrideIfSkipped)
isConfiguredToWarn := viper.GetBool(configNameOverrideIfWarning)
if isConfiguredToSkip {
fmt.Println("SKIP")
return
}
if execute(driver) {
fmt.Println("OK")
return
}
fmt.Println("FAIL")
errorMessage = fmt.Sprintf(" %s", errorMessage)
if isConfiguredToWarn {
fmt.Println(errorMessage)
} else {
atexit.ExitWithMessage(1, errorMessage)
}
}
// checkXhyveDriver returns true if xhyve driver is available on path and has
// the setuid-bit set
func checkXhyveDriver() bool {
path, err := exec.LookPath("docker-machine-driver-xhyve")
if err != nil {
return false
}
fi, _ := os.Stat(path)
// follow symlinks
if fi.Mode()&os.ModeSymlink != 0 {
path, err = os.Readlink(path)
if err != nil {
return false
}
}
fmt.Println("\n Driver is available at", path)
fmt.Printf(" Checking for setuid bit ... ")
if fi.Mode()&os.ModeSetuid == 0 {
return false
}
return true
}
// checkKvmDriver returns true if KVM driver is available on path
func checkKvmDriver() bool {
path, err := exec.LookPath("docker-machine-driver-kvm")
if err != nil {
return false
}
fi, _ := os.Stat(path)
// follow symlinks
if fi.Mode()&os.ModeSymlink != 0 {
path, err = os.Readlink(path)
if err != nil {
return false
}
}
fmt.Println(fmt.Sprintf("\n Driver is available at %s ... ", path))
fmt.Printf(" Checking driver binary is executable ... ")
if fi.Mode()&0011 == 0 {
return false
}
return true
}
//checkLibvirtInstalled returns true if Libvirt is installed
func checkLibvirtInstalled() bool {
path, err := exec.LookPath("virsh")
if err != nil {
return false
}
fi, _ := os.Stat(path)
if fi.Mode()&os.ModeSymlink != 0 {
path, err = os.Readlink(path)
if err != nil {
return false
}
}
return true
}
//checkLibvirtDefaultNetworkExists returns true if the "default" network is present
func checkLibvirtDefaultNetworkExists() bool {
cmd := exec.Command("virsh", "--connect", "qemu:///system", "net-list")
stdOutStdError, err := cmd.CombinedOutput()
if err != nil {
return false
}
stdOut := fmt.Sprintf("%s", stdOutStdError)
outputSlice := strings.Split(stdOut, "\n")
for _, stdOut = range outputSlice {
stdOut = strings.TrimSpace(stdOut)
match, err := regexp.MatchString("^default\\s", stdOut)
if err != nil {
return false
}
if match {
return true
}
}
return false
}
//checkLibvirtDefaultNetworkActive returns true if the "default" network is active
func checkLibvirtDefaultNetworkActive() bool {
cmd := exec.Command("virsh", "--connect", "qemu:///system", "net-list")
cmd.Env = cmdUtils.ReplaceEnv(os.Environ(), "LC_ALL", "C")
stdOutStdError, err := cmd.CombinedOutput()
if err != nil {
return false
}
stdOut := fmt.Sprintf("%s", stdOutStdError)
outputSlice := strings.Split(stdOut, "\n")
for _, stdOut = range outputSlice {
stdOut = strings.TrimSpace(stdOut)
match, err := regexp.MatchString("^default\\s", stdOut)
if err != nil {
return false
}
if match && strings.Contains(stdOut, "active") {
return true
}
}
return false
}
// checkHypervDriverSwitch returns true if Virtual Switch has been selected
func checkHypervDriverSwitch() bool {
switchEnv := os.Getenv("HYPERV_VIRTUAL_SWITCH")
if switchEnv == "" {
return false
}
return true
}
// checkHypervDriverInstalled returns true if Hyper-V driver is installed
func checkHypervDriverInstalled() bool {
// Check if Hyper-V's Virtual Machine Management Service is installed
_, err := exec.LookPath("vmms.exe")
if err != nil {
return false
}
// check to see if a hypervisor is present. if hyper-v is installed and enabled,
posh := powershell.New()
checkHypervisorPresent := `@(Get-Wmiobject Win32_ComputerSystem).HypervisorPresent`
stdOut, _ := posh.Execute(checkHypervisorPresent)
if !strings.Contains(stdOut, "True") {
return false
}
return true
}
// checkHypervDriverUser returns true if user is member of Hyper-V admin
func checkHypervDriverUser() bool {
posh := powershell.New()
// Use RID to prevent issues with localized groups: https://github.com/minishift/minishift/issues/1541
// https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems
// BUILTIN\Hyper-V Administrators => S-1-5-32-578
//Hyper-V Administrators group check fails: https://github.com/minishift/minishift/issues/2047
//Using SecurityIdentifier overload of IsInRole()
checkIfMemberOfHyperVAdmins :=
`$sid = New-Object System.Security.Principal.SecurityIdentifier("S-1-5-32-578")
@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole($sid)`
stdOut, _ := posh.Execute(checkIfMemberOfHyperVAdmins)
if !strings.Contains(stdOut, "True") {
return false
}
return true
}
// checkInstanceIP makes sure the instance has an IPv4 address.
// HyperV will issue IPv6 addresses on Internal virtual switch
// https://github.com/minishift/minishift/issues/418
func checkInstanceIP(driver drivers.Driver) bool {
ip, err := driver.GetIP()
if err == nil && net.ParseIP(ip).To4() != nil {
return true
}
return false
}
// checkIPConnectivity checks if the VM has connectivity to the outside network
func checkIPConnectivity(driver drivers.Driver) bool {
ipToPing := viper.GetString(configCmd.CheckNetworkPingHost.Name)
fmt.Printf("\n Pinging %s ... ", ipToPing)
return minishiftUtil.IsIPReachable(driver, ipToPing, false)
}
// checkHttpConnectivity allows to test outside connectivity and possible proxy support
func checkHttpConnectivity(driver drivers.Driver) bool {
urlToRetrieve := viper.GetString(configCmd.CheckNetworkHttpHost.Name)
fmt.Printf("\n Retrieving %s ... ", urlToRetrieve)
return minishiftUtil.IsRetrievable(driver, urlToRetrieve, false)
}
// checkStorageMounted checks if the persistent storage volume, storageDisk, is
// mounted to the VM instance
func checkStorageMounted(driver drivers.Driver) bool {
mounted, _ := isMounted(driver, StorageDisk)
return mounted
}
// checkStorageUsage checks if the persistent storage volume has enough storage
// space available.
func checkStorageUsage(driver drivers.Driver) bool {
_, usedPercentage := getDiskUsage(driver, StorageDisk)
fmt.Printf("%s used ", usedPercentage)
usage, err := strconv.Atoi(stringUtils.GetOnlyNumbers(usedPercentage))
if err != nil {
return false
}
if usage > 80 && usage < 95 {
fmt.Printf("!!! ")
}
if usage < 95 {
return true
}
return false
}
// isMounted checks returns usage of mountpoint known to the VM instance
func getDiskUsage(driver drivers.Driver, mountpoint string) (string, string) {
cmd := fmt.Sprintf(
"df -h %s | awk 'FNR > 1 {print $2,$5}'",
mountpoint)
out, err := drivers.RunSSHCommandFromDriver(driver, cmd)
if err != nil {
return "", "ERR"
}
diskDetails := strings.Split(strings.Trim(out, "\n"), " ")
diskSize := diskDetails[0]
diskUsage := diskDetails[1]
return diskSize, diskUsage
}
// isMounted checks if mountpoint is mounted to the VM instance
func isMounted(driver drivers.Driver, mountpoint string) (bool, error) {
cmd := fmt.Sprintf(
"if grep -qs %s /proc/mounts; then echo '1'; else echo '0'; fi",
mountpoint)
out, err := drivers.RunSSHCommandFromDriver(driver, cmd)
if err != nil {
return false, err
}
if strings.Trim(out, "\n") == "0" {
return false, nil
}
return true, nil
}
// checkIsoUrl checks the Iso url and returns true if the iso file exists
func checkIsoURL() bool {
isoUrl := viper.GetString(configCmd.ISOUrl.Name)
err := validations.IsValidISOUrl(configCmd.ISOUrl.Name, isoUrl)
if err != nil {
return false
}
return true
}
func checkVMDriver() bool {
err := validations.IsValidDriver(configCmd.VmDriver.Name, viper.GetString(configCmd.VmDriver.Name))
if err != nil {
return false
}
return true
}
func validateOpenshiftVersion() bool {
requestedVersion := viper.GetString(configCmd.OpenshiftVersion.Name)
valid, err := openshiftVersion.IsGreaterOrEqualToBaseVersion(requestedVersion, constants.MinimumSupportedOpenShiftVersion)
if err != nil {
return false
}
if !valid {
return false
}
return true
}
// checkOriginRelease return true if specified version of OpenShift is released
func checkOriginRelease() bool {
client := github.Client()
_, _, err := client.Repositories.GetReleaseByTag("openshift", "origin", viper.GetString(configCmd.OpenshiftVersion.Name))
if err != nil {
return false
}
return true
}
|
[
"\"HYPERV_VIRTUAL_SWITCH\""
] |
[] |
[
"HYPERV_VIRTUAL_SWITCH"
] |
[]
|
["HYPERV_VIRTUAL_SWITCH"]
|
go
| 1 | 0 | |
plugins/trivy/cmd/trivy/main.go
|
package main
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"os/exec"
"sort"
"strconv"
"github.com/fairwindsops/insights-plugins/trivy/pkg/image"
"github.com/fairwindsops/insights-plugins/trivy/pkg/models"
"github.com/fairwindsops/insights-plugins/trivy/pkg/util"
)
var maxConcurrentScans = 5
var numberToScan = 10
const outputFile = image.TempDir + "/final-report.json"
func main() {
concurrencyStr := os.Getenv("MAX_CONCURRENT_SCANS")
if concurrencyStr != "" {
var err error
maxConcurrentScans, err = strconv.Atoi(concurrencyStr)
if err != nil {
panic(err)
}
}
numberToScanStr := os.Getenv("MAX_SCANS")
if numberToScanStr != "" {
var err error
numberToScan, err = strconv.Atoi(numberToScanStr)
if err != nil {
panic(err)
}
}
err := util.RunCommand(exec.Command("trivy", "--download-db-only"), "downloading trivy database")
if err != nil {
panic(err)
}
util.CheckEnvironmentVariables()
lastReport := image.GetLastReport()
ctx := context.Background()
images, err := image.GetImages(ctx)
if err != nil {
panic(err)
}
imagesToScan := make([]models.Image, 0)
for _, image := range images {
found := false
for _, report := range lastReport.Images {
if report.Name == image.Name && report.ID == image.ID {
found = true
break
}
}
if !found {
imagesToScan = append(imagesToScan, image)
}
}
imagesToKeep := make([]models.ImageDetailsWithRefs, 0)
sort.Slice(lastReport.Images, func(a, b int) bool {
return lastReport.Images[a].LastScan == nil || lastReport.Images[b].LastScan != nil && lastReport.Images[a].LastScan.Before(*lastReport.Images[b].LastScan)
})
for _, report := range lastReport.Images {
keep := false
for _, image := range images {
if report.Name == image.Name && report.ID == image.ID {
if len(imagesToScan) < numberToScan {
imagesToScan = append(imagesToScan, image)
break
}
keep = true
break
}
}
if keep {
imagesToKeep = append(imagesToKeep, report)
}
}
lastReport.Images = imagesToKeep
if len(imagesToScan) > numberToScan {
imagesToScan = imagesToScan[:numberToScan]
}
allReports := image.ScanImages(imagesToScan, maxConcurrentScans)
finalReport := image.Minimize(allReports, lastReport)
data, err := json.Marshal(finalReport)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(outputFile, data, 0644)
if err != nil {
panic(err)
}
}
|
[
"\"MAX_CONCURRENT_SCANS\"",
"\"MAX_SCANS\""
] |
[] |
[
"MAX_SCANS",
"MAX_CONCURRENT_SCANS"
] |
[]
|
["MAX_SCANS", "MAX_CONCURRENT_SCANS"]
|
go
| 2 | 0 | |
Bicycle_parking/settings/environments/production.py
|
import os
import dj_database_url
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY')
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
S3_BUCKET = os.environ.get('S3_BUCKET')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
MAPS_API_KEY = os.environ.get('MAPS_API_KEY')
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_S3_CUSTOM_DOMAIN = os.environ.get('STATIC_URL')
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=3000',
}
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# please fill out these settings for your own local machine!
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bike_parking_toronto',
'USER': os.getenv('BIKE_DB_USER', 'postgres'),
'PASSWORD': os.getenv('BIKE_DB_PW', 'postgres'),
'HOST': os.getenv('BIKE_DB_HOST', 'localhost'),
'PORT': '5432',
}
}
# note: BIKE_DB_* variables replace $DATABASE_URL to support
# multiple database access
# Update database configuration with $DATABASE_URL.
# db_from_env = dj_database_url.config(conn_max_age=500)
# DATABASES['default'].update(db_from_env)
# define the database routers; these objects route requests passed to the django
# routines to update or access a table defined as a model class in python
# to the appropriate database
DATABASE_ROUTERS = ['bicycleparking.Routers.DefaultRouting']
# my_project/settings.py
LOGIN_REDIRECT_URL = '/moderate_unapproved'
|
[] |
[] |
[
"S3_BUCKET",
"AWS_SECRET_ACCESS_KEY",
"STATIC_URL",
"MAPS_API_KEY",
"AWS_STORAGE_BUCKET_NAME",
"SECRET_KEY",
"BIKE_DB_HOST",
"AWS_ACCESS_KEY_ID",
"BIKE_DB_PW",
"BIKE_DB_USER"
] |
[]
|
["S3_BUCKET", "AWS_SECRET_ACCESS_KEY", "STATIC_URL", "MAPS_API_KEY", "AWS_STORAGE_BUCKET_NAME", "SECRET_KEY", "BIKE_DB_HOST", "AWS_ACCESS_KEY_ID", "BIKE_DB_PW", "BIKE_DB_USER"]
|
python
| 10 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend EZPAYs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a eazypayzad or eazypayza-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the eazypayza data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/EazyPayZA/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "EazyPayZA")
return os.path.expanduser("~/.eazypayza")
def read_bitcoin_config(dbdir):
"""Read the eazypayza.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "eazypayza.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a eazypayza JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 155662 if testnet else 5567
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the eazypayzad we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(eazypayzad):
info = eazypayzad.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
eazypayzad.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = eazypayzad.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(eazypayzad):
address_summary = dict()
address_to_account = dict()
for info in eazypayzad.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = eazypayzad.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = eazypayzad.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-eazypayza-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(eazypayzad, fromaddresses, toaddress, amount, fee):
all_coins = list_available(eazypayzad)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to eazypayzad.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = eazypayzad.createrawtransaction(inputs, outputs)
signed_rawtx = eazypayzad.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(eazypayzad, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = eazypayzad.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(eazypayzad, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = eazypayzad.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(eazypayzad, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get EZPAYs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send EZPAYs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of eazypayza.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
eazypayzad = connect_JSON(config)
if options.amount is None:
address_summary = list_available(eazypayzad)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(eazypayzad) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(eazypayzad, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(eazypayzad, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = eazypayzad.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
firewall/windows.go
|
// +build windows
package firewall
import (
"fmt"
"github.com/ingrammicro/concerto/api/types"
"github.com/ingrammicro/concerto/firewall/discovery"
"github.com/ingrammicro/concerto/utils"
)
func driverName() string {
return "windows"
}
func Apply(policy types.Policy) error {
err := flush()
if err != nil {
return err
}
for i, rule := range policy.Rules {
cidr := rule.Cidr
if rule.Cidr == "0.0.0.0/0" {
cidr = "any"
}
ruleCmd := fmt.Sprintf(
"netsh advfirewall firewall add rule name=\"Concerto firewall %d\" dir=in action=allow remoteip=\"%s\" protocol=\"%s\" localport=\"%d-%d\"",
i, cidr, rule.Protocol, rule.MinPort, rule.MaxPort)
utils.RunCmd(ruleCmd)
}
utils.RunCmd("netsh advfirewall set allprofiles state on")
return nil
}
func flush() error {
fc, err := discovery.CurrentFirewallRules()
if err != nil {
return err
}
utils.RunCmd("netsh advfirewall set allprofiles state off")
utils.RunCmd("netsh advfirewall set allprofiles firewallpolicy allowinbound,allowoutbound")
//utils.RunCmd("netsh advfirewall firewall delete rule name=all")
for _, r := range fc[0].Rules {
utils.RunCmd(fmt.Sprintf("netsh advfirewall firewall delete rule name=%q", r.Name))
}
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
AT/train_adv_cifar10_wd.py
|
from __future__ import print_function
import os
import pickle
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.optim as optim
from torchvision import datasets, transforms
# from models.wideresnet import *
from models import *
from losses import alp_loss, pgd_loss, trades_loss
from lip.add_lip import bind_lip
from lip.recorder import Recorder
from auto_attack.autoattack import AutoAttack
def train(args, model, device, train_loader, optimizer, recorder, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
# calculate robust loss
loss = LOSS[args.loss](model=model,
x_natural=data,
y=target,
optimizer=optimizer,
step_size=args.step_size,
epsilon=args.epsilon,
perturb_steps=args.num_steps,
beta=args.beta,
loss=args.loss,
distance=args.distance,
m = args.m,
s = args.s)
loss.backward()
lipc, all_lip = model.calc_lip()
recorder.record('lip_sum', lipc)
recorder.record('lip', all_lip)
optimizer.step()
# print progress
# if batch_idx % args.log_interval == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item()))
def eval_train(model, device, train_loader, recorder):
model.eval()
train_loss = 0
correct = 0
with torch.no_grad():
for data, target in train_loader:
data, target = data.to(device), target.to(device)
output = model(data)
train_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
train_loss /= len(train_loader.dataset)
print('Training: loss: {:.4f}, Acc: {}/{} ({:.0f}%)'.format(
train_loss, correct, len(train_loader.dataset),
100. * correct / len(train_loader.dataset)), end=' | ')
training_accuracy = correct / len(train_loader.dataset)
recorder.record('train_acc', training_accuracy)
recorder.record_train(train_loss)
return train_loss, training_accuracy
def eval_test(model, device, test_loader, recorder):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test: loss: {:.4f}, Acc: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test_accuracy = correct / len(test_loader.dataset)
recorder.record('test_acc', test_accuracy)
recorder.record_test(test_loss)
return test_loss, test_accuracy
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate"""
lr = args.lr
if epoch >= 75:
lr = args.lr * 0.1
if epoch >= 90:
lr = args.lr * 0.01
if epoch >= 100:
lr = args.lr * 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
# init model, ResNet18() can be also used here for training
if args.loss == 'alp' or args.loss == 'trades' or args.loss == 'pgd':
print("normalize False")
model = nets[args.model]().to(device)
else:
print("normalize True")
model = nets[args.model](use_FNandWN=True).to(device)
bind_lip(model, norm='1-norm', verbose=False)
recorder = Recorder(f'{name}')
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(1, args.epochs + 1):
# adjust learning rate for SGD
print(f'Epoch: {epoch:3d}', end=' ')
adjust_learning_rate(optimizer, epoch)
# adversarial training
train(args, model, device, train_loader, optimizer, recorder, epoch)
# evaluation on natural examples
# print('==============')
eval_train(model, device, train_loader, recorder)
eval_test(model, device, test_loader, recorder)
# print('==============')
# save checkpoint
if (epoch >= args.start_freq) and (epoch % args.save_freq == 0):
torch.save(model.state_dict(),
os.path.join(model_dir, f'{name}-epoch{epoch}.pt'))
recorder.step()
torch.save(model.state_dict(), os.path.join(model_dir, f'{name}.pt'))
with open(f'{log_dir}/{name}_record.pkl', 'wb') as file:
pickle.dump(recorder, file)
recorder.draw('lip_sum')
recorder.draw_many('lip')
recorder.draw('train_acc')
recorder.draw('test_acc')
adversary = AutoAttack(model, norm='Linf', eps=8/255, version='standard', verbose=False)
adversary.attacks_to_run = ['apgd-ce', 'apgd-t']
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
# print(inputs.max(), inputs.min())
x_adv, robust_accuracy = adversary.run_standard_evaluation(inputs, targets, bs=128)
print(f'robust_accuracy: {robust_accuracy}')
break
recorder.record('robust_accuracy', robust_accuracy)
with open(f'{log_dir}/{name}_record.pkl', 'wb') as file:
pickle.dump(recorder, file)
if __name__ == '__main__':
nets = {
'vgg': VGG,
'regnet': RegNetX_200MF,
'resnet': ResNet18,
'preact_resnet': PreActResNet18,
'googlenet': GoogLeNet,
'densenet': DenseNet121,
'resnetxt': ResNeXt29_2x64d,
'mobilenet': MobileNet,
'mobilenet2': MobileNetV2,
'dpn': DPN92,
'shefflenet': ShuffleNetG2,
'senet': SENet18,
'shefflenet2': ShuffleNetV2,
'efficientnet': EfficientNetB0
}
models = [key for key, value in nets.items()]
parser = argparse.ArgumentParser(description='Adversarial Training')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 128)')
parser.add_argument('--epochs', type=int, default=120, metavar='N',
help='number of epochs to train')
parser.add_argument('--weight-decay', '--wd', default=0.,
type=float, metavar='W')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epsilon', default=0.031,
help='perturbation')
parser.add_argument('--num-steps', default=10,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.007,
help='perturb step size')
parser.add_argument('--beta', type = float, default=1.0)
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--snap-epoch', type=int, default=5, metavar='N',
help='how many batches to test')
parser.add_argument('--model', default='resnet', type=str,
choices=models, help='model to use')
parser.add_argument('--save-freq', default=10, type=int, metavar='N',
help='save frequency')
parser.add_argument('--start-freq', default=1, type=int, metavar='N',
help='start point')
parser.add_argument('--loss', default='pgd', type=str,
choices=['pgd', 'pgd_he', 'alp', 'alp_he', 'trades', 'trades_he'])
parser.add_argument('--distance', default='l_inf', type=str, help='distance')
parser.add_argument('--m', default=0.2, type=float, help='angular margin')
parser.add_argument('--s', default=15.0, type=float, help='s value')
parser.add_argument('--gpu_id', default='0', type=str, help='gpu id to use')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
model_dir = "checkpoint_wd//" + args.loss
if not os.path.exists(model_dir):
os.makedirs(model_dir)
log_dir = './log_wd'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
# setup data loader
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, **kwargs)
testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
testloader = torch.utils.data.DataLoader(testset, batch_size=128*8, shuffle=True, **kwargs)
LOSS= {
'pgd': pgd_loss,
'pgd_he': pgd_loss,
'alp': alp_loss,
'alp_he': alp_loss,
'trades': trades_loss,
'trades_he': trades_loss,
}
for los in ['alp']:
args.loss = los
name = f'{args.model}_{args.loss}_wd'
print(f'Using {name}')
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
cmd/abapEnvironmentRunATCCheck_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type abapEnvironmentRunATCCheckOptions struct {
AtcConfig string `json:"atcConfig,omitempty"`
Repositories string `json:"repositories,omitempty"`
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfServiceInstance string `json:"cfServiceInstance,omitempty"`
CfServiceKeyName string `json:"cfServiceKeyName,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Host string `json:"host,omitempty"`
AtcResultsFileName string `json:"atcResultsFileName,omitempty"`
GenerateHTML bool `json:"generateHTML,omitempty"`
}
// AbapEnvironmentRunATCCheckCommand Runs an ATC Check
func AbapEnvironmentRunATCCheckCommand() *cobra.Command {
const STEP_NAME = "abapEnvironmentRunATCCheck"
metadata := abapEnvironmentRunATCCheckMetadata()
var stepConfig abapEnvironmentRunATCCheckOptions
var startTime time.Time
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createAbapEnvironmentRunATCCheckCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Runs an ATC Check",
Long: `This step is for triggering an [ATC](https://help.sap.com/viewer/65de2977205c403bbc107264b8eccf4b/Cloud/en-US/d8cec788fc104ff9ad9c3757b4dd13d4.html) test run on an SAP Cloud Platform ABAP Environment system.
Please provide either of the following options:
* The host and credentials the Cloud Platform ABAP Environment system itself. The credentials must be configured for the Communication Scenario [SAP_COM_0510](https://help.sap.com/viewer/65de2977205c403bbc107264b8eccf4b/Cloud/en-US/b04a9ae412894725a2fc539bfb1ca055.html).
* The Cloud Foundry parameters (API endpoint, organization, space), credentials, the service instance for the ABAP service and the service key for the Communication Scenario SAP_COM_0510.
* Only provide one of those options with the respective credentials. If all values are provided, the direct communication (via host) has priority.
Regardless of the option you chose, please make sure to provide the configuration the object set (e.g. with Software Components and Packages) that you want to be checked analog to the examples listed on this page.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
abapEnvironmentRunATCCheck(stepConfig, &stepTelemetryData)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapEnvironmentRunATCCheckFlags(createAbapEnvironmentRunATCCheckCmd, &stepConfig)
return createAbapEnvironmentRunATCCheckCmd
}
func addAbapEnvironmentRunATCCheckFlags(cmd *cobra.Command, stepConfig *abapEnvironmentRunATCCheckOptions) {
cmd.Flags().StringVar(&stepConfig.AtcConfig, "atcConfig", os.Getenv("PIPER_atcConfig"), "Path to a YAML configuration file for the object set to be checked during ATC run")
cmd.Flags().StringVar(&stepConfig.Repositories, "repositories", os.Getenv("PIPER_repositories"), "Specifies a YAML file containing the repositories configuration")
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", os.Getenv("PIPER_cfApiEndpoint"), "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "CF org")
cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Parameter of ServiceInstance Name to delete CloudFoundry Service")
cmd.Flags().StringVar(&stepConfig.CfServiceKeyName, "cfServiceKeyName", os.Getenv("PIPER_cfServiceKeyName"), "Parameter of CloudFoundry Service Key to be created")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "CF Space")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0510")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0510")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the host address of the SAP Cloud Platform ABAP Environment system")
cmd.Flags().StringVar(&stepConfig.AtcResultsFileName, "atcResultsFileName", `ATCResults.xml`, "Specifies output file name for the results from the ATC run. This file name will also be used for generating the HTML file")
cmd.Flags().BoolVar(&stepConfig.GenerateHTML, "generateHTML", false, "Specifies whether the ATC results should also be generated as an HTML document")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
}
// retrieve step metadata
func abapEnvironmentRunATCCheckMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapEnvironmentRunATCCheck",
Aliases: []config.Alias{},
Description: "Runs an ATC Check",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "abapCredentialsId", Description: "Jenkins credentials ID containing user and password to authenticate to the Cloud Platform ABAP Environment system or the Cloud Foundry API", Type: "jenkins", Aliases: []config.Alias{{Name: "cfCredentialsId", Deprecated: false}}},
},
Parameters: []config.StepParameters{
{
Name: "atcConfig",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_atcConfig"),
},
{
Name: "repositories",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_repositories"),
},
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
Default: os.Getenv("PIPER_cfApiEndpoint"),
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
Default: os.Getenv("PIPER_cfOrg"),
},
{
Name: "cfServiceInstance",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}},
Default: os.Getenv("PIPER_cfServiceInstance"),
},
{
Name: "cfServiceKeyName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceKey"}, {Name: "cloudFoundry/serviceKeyName"}, {Name: "cfServiceKey"}},
Default: os.Getenv("PIPER_cfServiceKeyName"),
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
Default: os.Getenv("PIPER_cfSpace"),
},
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "abapCredentialsId",
Param: "username",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "abapCredentialsId",
Param: "password",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_host"),
},
{
Name: "atcResultsFileName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `ATCResults.xml`,
},
{
Name: "generateHTML",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
},
},
Containers: []config.Container{
{Name: "cf", Image: "ppiper/cf-cli:7"},
},
},
}
return theMetaData
}
|
[
"\"PIPER_atcConfig\"",
"\"PIPER_repositories\"",
"\"PIPER_cfApiEndpoint\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_cfServiceKeyName\"",
"\"PIPER_cfSpace\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_host\"",
"\"PIPER_atcConfig\"",
"\"PIPER_repositories\"",
"\"PIPER_cfApiEndpoint\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_cfServiceKeyName\"",
"\"PIPER_cfSpace\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_host\""
] |
[] |
[
"PIPER_cfSpace",
"PIPER_host",
"PIPER_cfApiEndpoint",
"PIPER_password",
"PIPER_username",
"PIPER_cfServiceInstance",
"PIPER_repositories",
"PIPER_cfServiceKeyName",
"PIPER_cfOrg",
"PIPER_atcConfig"
] |
[]
|
["PIPER_cfSpace", "PIPER_host", "PIPER_cfApiEndpoint", "PIPER_password", "PIPER_username", "PIPER_cfServiceInstance", "PIPER_repositories", "PIPER_cfServiceKeyName", "PIPER_cfOrg", "PIPER_atcConfig"]
|
go
| 10 | 0 | |
numba/tests/test_svml.py
|
from __future__ import division, print_function
import math
import numpy as np
import subprocess
import numbers
import importlib
import sys
from .support import TestCase, tag, override_env_config
import numba
from numba.compiler import compile_isolated, Flags
from numba import unittest_support as unittest
needs_svml = unittest.skipUnless(numba.config.USING_SVML,
"SVML tests need SVML to be present")
def math_sin_scalar(x):
return math.sin(x)
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
@needs_svml
class TestSVML(TestCase):
""" Tests SVML behaves as expected """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
def __init__(self, *args):
self.flags = Flags()
self.flags.set('nrt')
# flags for njit(fastmath=True)
self.fastflags = Flags()
self.fastflags.set('nrt')
self.fastflags.set('fastmath')
super(TestSVML, self).__init__(*args)
def compile(self, func, *args, **kwargs):
assert not kwargs
sig = tuple([numba.typeof(x) for x in args])
std = compile_isolated(func, sig, flags=self.flags)
fast = compile_isolated(func, sig, flags=self.fastflags)
return std, fast
def copy_args(self, *args):
if not args:
return tuple()
new_args = []
for x in args:
if isinstance(x, np.ndarray):
new_args.append(x.copy('k'))
elif isinstance(x, np.number):
new_args.append(x.copy())
elif isinstance(x, numbers.Number):
new_args.append(x)
else:
raise ValueError('Unsupported argument type encountered')
return tuple(new_args)
def check(self, pyfunc, *args, **kwargs):
jitstd, jitfast = self.compile(pyfunc, *args)
std_pattern = kwargs.pop('std_pattern', None)
fast_pattern = kwargs.pop('fast_pattern', None)
cpu_name = kwargs.pop('cpu_name', 'skylake-avx512')
# python result
py_expected = pyfunc(*self.copy_args(*args))
# jit result
jitstd_result = jitstd.entry_point(*self.copy_args(*args))
# fastmath result
jitfast_result = jitfast.entry_point(*self.copy_args(*args))
# assert numerical equality
np.testing.assert_almost_equal(jitstd_result, py_expected, **kwargs)
np.testing.assert_almost_equal(jitfast_result, py_expected, **kwargs)
# look for specific patters in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', cpu_name), \
override_env_config('NUMBA_CPU_FEATURES', ''):
# recompile for overridden CPU
jitstd, jitfast = self.compile(pyfunc, *args)
if std_pattern:
self.check_svml_presence(jitstd, std_pattern)
if fast_pattern:
self.check_svml_presence(jitfast, fast_pattern)
def check_svml_presence(self, func, pattern):
asm = func.library.get_asm_str()
self.assertIn(pattern, asm)
def test_scalar_context(self):
# SVML will not be used.
pat = '$_sin' if numba.config.IS_OSX else '$sin'
self.check(math_sin_scalar, 7., std_pattern=pat)
self.check(math_sin_scalar, 7., fast_pattern=pat)
def test_svml(self):
# loops both with and without fastmath should use SVML.
# The high accuracy routines are dropped if `fastmath` is set
std = "__svml_sin8_ha,"
fast = "__svml_sin8," # No `_ha`!
self.check(math_sin_loop, 10, std_pattern=std, fast_pattern=fast)
def test_svml_disabled(self):
code = """if 1:
import os
import numpy as np
import math
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
def check_no_svml():
try:
# ban the use of SVML
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
# delay numba imports to account for env change as
# numba.__init__ picks up SVML and it is too late by
# then to override using `numba.config`
import numba
from numba import config
from numba.tests.support import override_env_config
from numba.compiler import compile_isolated, Flags
# compile for overridden CPU, with and without fastmath
with override_env_config('NUMBA_CPU_NAME', 'skylake-avx512'), \
override_env_config('NUMBA_CPU_FEATURES', ''):
sig = (numba.int32,)
f = Flags()
f.set('nrt')
std = compile_isolated(math_sin_loop, sig, flags=f)
f.set('fastmath')
fast = compile_isolated(math_sin_loop, sig, flags=f)
fns = std, fast
# assert no SVML call is present in the asm
for fn in fns:
asm = fn.library.get_asm_str()
assert '__svml_sin' not in asm
finally:
# not really needed as process is separate
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '0'
config.reload_config()
check_no_svml()
"""
popen = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"NUMBA_DISABLE_INTEL_SVML"
] |
[]
|
["NUMBA_DISABLE_INTEL_SVML"]
|
python
| 1 | 0 | |
test/e2e/main/main_test.go
|
package main
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/iancoleman/strcase"
ispnv1 "github.com/infinispan/infinispan-operator/api/v1"
v1 "github.com/infinispan/infinispan-operator/api/v1"
"github.com/infinispan/infinispan-operator/api/v2alpha1"
"github.com/infinispan/infinispan-operator/controllers"
cconsts "github.com/infinispan/infinispan-operator/controllers/constants"
"github.com/infinispan/infinispan-operator/pkg/hash"
users "github.com/infinispan/infinispan-operator/pkg/infinispan/security"
kube "github.com/infinispan/infinispan-operator/pkg/kubernetes"
tutils "github.com/infinispan/infinispan-operator/test/e2e/utils"
"gopkg.in/yaml.v2"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
var testKube = tutils.NewTestKubernetes(os.Getenv("TESTING_CONTEXT"))
var serviceAccountKube = tutils.NewTestKubernetes("")
var log = logf.Log.WithName("main_test")
func TestMain(m *testing.M) {
tutils.RunOperator(m, testKube)
}
func TestUpdateOperatorPassword(t *testing.T) {
t.Parallel()
// Create a resource without passing any config
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
newPassword := "supersecretoperatorpassword"
secret, err := testKube.Kubernetes.GetSecret(spec.GetAdminSecretName(), spec.Namespace, context.TODO())
tutils.ExpectNoError(err)
_, err = kube.CreateOrPatch(context.TODO(), testKube.Kubernetes.Client, secret, func() error {
secret.Data["password"] = []byte(newPassword)
return nil
})
tutils.ExpectNoError(err)
err = wait.Poll(tutils.DefaultPollPeriod, tutils.SinglePodTimeout, func() (bool, error) {
secret, err = testKube.Kubernetes.GetSecret(spec.GetAdminSecretName(), spec.Namespace, context.TODO())
tutils.ExpectNoError(err)
identities := secret.Data[cconsts.ServerIdentitiesFilename]
pwd, err := users.FindPassword(cconsts.DefaultOperatorUser, identities)
tutils.ExpectNoError(err)
fmt.Printf("Pwd=%s, Identities=%s", string(pwd), string(identities))
return pwd == newPassword, nil
})
tutils.ExpectNoError(err)
}
func TestUpdateEncryptionSecrets(t *testing.T) {
t.Parallel()
// Create a resource without passing any config
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Spec.Replicas = 1
spec.Spec.Security = ispnv1.InfinispanSecurity{
EndpointEncryption: tutils.EndpointEncryption(spec.Name),
}
// Create secret
serverName := tutils.GetServerName(spec)
keystore, truststore, tlsConfig := tutils.CreateKeyAndTruststore(serverName, false)
keystoreSecret := tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret := tutils.EncryptionSecretClientTrustore(spec.Name, tutils.Namespace, truststore)
testKube.CreateSecret(keystoreSecret)
defer testKube.DeleteSecret(keystoreSecret)
testKube.CreateSecret(truststoreSecret)
defer testKube.DeleteSecret(truststoreSecret)
// Create Cluster
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
// Ensure that we can connect to the endpoint with TLS
host, client := tutils.HTTPSClientAndHost(spec, tlsConfig, testKube)
checkRestConnection(host, client)
namespacedName := types.NamespacedName{Namespace: spec.Namespace, Name: spec.GetStatefulSetName()}
// Get the cluster's StatefulSet and current generation
ss := appsv1.StatefulSet{}
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), namespacedName, &ss))
originalGeneration := ss.Status.ObservedGeneration
// Update secret to contain new keystore
newKeystore, newTruststore, newTlsConfig := tutils.CreateKeyAndTruststore(serverName, false)
if bytes.Equal(keystore, newKeystore) || bytes.Equal(truststore, newTruststore) {
panic("Expected new store")
}
keystoreSecret = testKube.GetSecret(keystoreSecret.Name, keystoreSecret.Namespace)
keystoreSecret.Data[controllers.EncryptPkcs12KeystoreName] = newKeystore
testKube.UpdateSecret(keystoreSecret)
truststoreSecret = testKube.GetSecret(truststoreSecret.Name, truststoreSecret.Namespace)
keystoreSecret.Data[cconsts.EncryptTruststoreKey] = newKeystore
testKube.UpdateSecret(truststoreSecret)
// Wait for a new generation to appear
err := wait.Poll(tutils.DefaultPollPeriod, tutils.SinglePodTimeout, func() (done bool, err error) {
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), namespacedName, &ss))
return ss.Status.ObservedGeneration >= originalGeneration+1, nil
})
tutils.ExpectNoError(err)
// Wait that current and update revisions match. This ensure that the rolling upgrade completes
err = wait.Poll(tutils.DefaultPollPeriod, tutils.SinglePodTimeout, func() (done bool, err error) {
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), namespacedName, &ss))
return ss.Status.CurrentRevision == ss.Status.UpdateRevision, nil
})
tutils.ExpectNoError(err)
// Ensure that we can connect to the endpoint with the new TLS settings
host, client = tutils.HTTPSClientAndHost(spec, newTlsConfig, testKube)
checkRestConnection(host, client)
}
// Test if single node working correctly
func TestNodeStartup(t *testing.T) {
// Create a resource without passing any config
spec := tutils.DefaultSpec(testKube)
spec.Annotations = make(map[string]string)
spec.Annotations[v1.TargetLabels] = "my-svc-label"
spec.Labels = make(map[string]string)
spec.Labels["my-svc-label"] = "my-svc-value"
os.Setenv(v1.OperatorTargetLabelsEnvVarName, "{\"operator-svc-label\":\"operator-svc-value\"}")
os.Setenv(v1.OperatorTargetLabelsEnvVarName, "{\"operator-svc-label\":\"operator-svc-value\"}")
defer os.Unsetenv(v1.OperatorTargetLabelsEnvVarName)
spec.Annotations[v1.PodTargetLabels] = "my-pod-label"
spec.Labels["my-svc-label"] = "my-svc-value"
spec.Labels["my-pod-label"] = "my-pod-value"
os.Setenv(v1.OperatorPodTargetLabelsEnvVarName, "{\"operator-pod-label\":\"operator-pod-value\"}")
defer os.Unsetenv(v1.OperatorPodTargetLabelsEnvVarName)
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
pod := corev1.Pod{}
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Name: spec.Name + "-0", Namespace: tutils.Namespace}, &pod))
// Checking labels propagation to pods
// from Infinispan CR to pods
if pod.Labels["my-pod-label"] != ispn.Labels["my-pod-label"] {
panic("Infinispan CR labels haven't been propagated to pods")
}
// from operator environment
if tutils.RunLocalOperator == "TRUE" {
// running locally, labels are hardcoded and set by the testsuite
if pod.Labels["operator-pod-label"] != "operator-pod-value" ||
ispn.Labels["operator-pod-label"] != "operator-pod-value" {
panic("Infinispan CR labels haven't been propagated to pods")
}
} else {
// Get the operator namespace from the env if it's different
// from the testsuite one
operatorNS := tutils.OperatorNamespace
if operatorNS == "" {
operatorNS = spec.Namespace
}
// operator deployed on cluster, labels are set by the deployment
if !areOperatorLabelsPropagated(operatorNS, ispnv1.OperatorPodTargetLabelsEnvVarName, pod.Labels) {
panic("Operator labels haven't been propagated to pods")
}
}
svcList := &corev1.ServiceList{}
tutils.ExpectNoError(testKube.Kubernetes.ResourcesList(ispn.Namespace, map[string]string{"infinispan_cr": "test-node-startup"}, svcList, context.TODO()))
if len(svcList.Items) == 0 {
panic("No services found for cluster")
}
for _, svc := range svcList.Items {
// from Infinispan CR to service
if svc.Labels["my-svc-label"] != ispn.Labels["my-svc-label"] {
panic("Infinispan CR labels haven't been propagated to services")
}
// from operator environment
if tutils.RunLocalOperator == "TRUE" {
// running locally, labels are hardcoded and set by the testsuite
if svc.Labels["operator-svc-label"] != "operator-svc-value" ||
ispn.Labels["operator-svc-label"] != "operator-svc-value" {
panic("Labels haven't been propagated to services")
}
} else {
// Get the operator namespace from the env if it's different
// from the testsuite one
operatorNS := tutils.OperatorNamespace
if operatorNS == "" {
operatorNS = spec.Namespace
}
// operator deployed on cluster, labels are set by the deployment
if !areOperatorLabelsPropagated(operatorNS, ispnv1.OperatorTargetLabelsEnvVarName, svc.Labels) {
panic("Operator labels haven't been propagated to services")
}
}
}
}
// areOperatorLabelsPropagated helper function that read the labels from the infinispan operator pod
// and match them with the labels map provided by the caller
func areOperatorLabelsPropagated(namespace, varName string, labels map[string]string) bool {
podList := &corev1.PodList{}
tutils.ExpectNoError(testKube.Kubernetes.ResourcesList(namespace, map[string]string{"name": tutils.OperatorName}, podList, context.TODO()))
if len(podList.Items) == 0 {
panic("Cannot get the Infinispan operator pod")
}
labelsAsString := ""
for _, item := range podList.Items[0].Spec.Containers[0].Env {
if item.Name == varName {
labelsAsString = item.Value
}
}
if labelsAsString == "" {
return true
}
opLabels := make(map[string]string)
if json.Unmarshal([]byte(labelsAsString), &opLabels) != nil {
return true
}
for name, value := range opLabels {
if labels[name] != value {
return false
}
}
return true
}
// Run some functions for testing rights not covered by integration tests
func TestRolesSynthetic(t *testing.T) {
_, err := serviceAccountKube.Kubernetes.GetNodeHost(log, context.TODO())
tutils.ExpectNoError(err)
_, err = kube.FindStorageClass("not-present-storage-class", serviceAccountKube.Kubernetes.Client, context.TODO())
if !errors.IsNotFound(err) {
tutils.ExpectNoError(err)
}
}
// Test if single node with n ephemeral storage
func TestNodeWithEphemeralStorage(t *testing.T) {
t.Parallel()
// Create a resource without passing any config
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Spec.Service.Container = &ispnv1.InfinispanServiceContainerSpec{EphemeralStorage: true}
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
// Making sure no PVCs were created
pvcs := &corev1.PersistentVolumeClaimList{}
err := testKube.Kubernetes.ResourcesList(spec.Namespace, controllers.PodLabels(spec.Name), pvcs, context.TODO())
tutils.ExpectNoError(err)
if len(pvcs.Items) > 0 {
tutils.ExpectNoError(fmt.Errorf("persistent volume claims were found (count = %d) but not expected for ephemeral storage configuration", len(pvcs.Items)))
}
}
// Test if the cluster is working correctly
func TestClusterFormation(t *testing.T) {
t.Parallel()
// Create a resource without passing any config
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Spec.Replicas = 2
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(2, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
}
// Test if the cluster is working correctly
func TestClusterFormationWithTLS(t *testing.T) {
t.Parallel()
// Create a resource without passing any config
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Spec.Replicas = 2
spec.Spec.Security = ispnv1.InfinispanSecurity{
EndpointEncryption: tutils.EndpointEncryption(spec.Name),
}
// Create secret with server certificates
serverName := tutils.GetServerName(spec)
cert, privKey, tlsConfig := tutils.CreateServerCertificates(serverName)
secret := tutils.EncryptionSecret(spec.Name, tutils.Namespace, privKey, cert)
testKube.CreateSecret(secret)
defer testKube.DeleteSecret(secret)
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(2, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
// Ensure that we can connect to the endpoint with TLS
host, client := tutils.HTTPSClientAndHost(spec, tlsConfig, testKube)
checkRestConnection(host, client)
}
// Test if the cluster is working correctly
func TestTLSWithExistingKeystore(t *testing.T) {
t.Parallel()
// Create a resource without passing any config
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Spec.Replicas = 1
spec.Spec.Security = ispnv1.InfinispanSecurity{
EndpointEncryption: tutils.EndpointEncryption(spec.Name),
}
// Create secret
serverName := tutils.GetServerName(spec)
keystore, tlsConfig := tutils.CreateKeystore(serverName)
secret := tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
testKube.CreateSecret(secret)
defer testKube.DeleteSecret(secret)
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
// Ensure that we can connect to the endpoint with TLS
host, client := tutils.HTTPSClientAndHost(spec, tlsConfig, testKube)
checkRestConnection(host, client)
}
func checkRestConnection(hostAddr string, client tutils.HTTPClient) {
url := fmt.Sprintf("%v/rest/v2/cache-managers/default", hostAddr)
rsp, err := client.Get(url, nil)
tutils.ExpectNoError(err)
defer tutils.CloseHttpResponse(rsp)
if rsp.StatusCode != http.StatusOK {
panic(httpError{rsp.StatusCode})
}
}
func TestClientCertValidate(t *testing.T) {
testClientCert(t, func(spec *v1.Infinispan) (authType ispnv1.ClientCertType, keystoreSecret, truststoreSecret *corev1.Secret, tlsConfig *tls.Config) {
authType = ispnv1.ClientCertValidate
serverName := tutils.GetServerName(spec)
keystore, truststore, tlsConfig := tutils.CreateKeyAndTruststore(serverName, false)
keystoreSecret = tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret = tutils.EncryptionSecretClientTrustore(spec.Name, tutils.Namespace, truststore)
return
})
}
func TestClientCertValidateNoAuth(t *testing.T) {
testClientCert(t, func(spec *v1.Infinispan) (authType ispnv1.ClientCertType, keystoreSecret, truststoreSecret *corev1.Secret, tlsConfig *tls.Config) {
spec.Spec.Security.EndpointAuthentication = pointer.BoolPtr(false)
authType = ispnv1.ClientCertValidate
serverName := tutils.GetServerName(spec)
keystore, truststore, tlsConfig := tutils.CreateKeyAndTruststore(serverName, false)
keystoreSecret = tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret = tutils.EncryptionSecretClientTrustore(spec.Name, tutils.Namespace, truststore)
return
})
}
func TestClientCertAuthenticate(t *testing.T) {
testClientCert(t, func(spec *v1.Infinispan) (authType ispnv1.ClientCertType, keystoreSecret, truststoreSecret *corev1.Secret, tlsConfig *tls.Config) {
authType = ispnv1.ClientCertAuthenticate
serverName := tutils.GetServerName(spec)
keystore, truststore, tlsConfig := tutils.CreateKeyAndTruststore(serverName, true)
keystoreSecret = tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret = tutils.EncryptionSecretClientTrustore(spec.Name, tutils.Namespace, truststore)
return
})
}
func TestClientCertValidateWithAuthorization(t *testing.T) {
testClientCert(t, func(spec *v1.Infinispan) (authType ispnv1.ClientCertType, keystoreSecret, truststoreSecret *corev1.Secret, tlsConfig *tls.Config) {
spec.Spec.Security.Authorization = &v1.Authorization{
Enabled: true,
Roles: []ispnv1.AuthorizationRole{
{
Name: "client",
Permissions: []string{"ALL"},
},
},
}
authType = ispnv1.ClientCertValidate
serverName := tutils.GetServerName(spec)
keystore, truststore, tlsConfig := tutils.CreateKeyAndTruststore(serverName, false)
keystoreSecret = tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret = tutils.EncryptionSecretClientTrustore(spec.Name, tutils.Namespace, truststore)
return
})
}
func TestClientCertAuthenticateWithAuthorization(t *testing.T) {
testClientCert(t, func(spec *v1.Infinispan) (authType ispnv1.ClientCertType, keystoreSecret, truststoreSecret *corev1.Secret, tlsConfig *tls.Config) {
spec.Spec.Security.Authorization = &v1.Authorization{
Enabled: true,
Roles: []ispnv1.AuthorizationRole{
{
Name: "client",
Permissions: []string{"ALL"},
},
},
}
authType = ispnv1.ClientCertAuthenticate
serverName := tutils.GetServerName(spec)
keystore, truststore, tlsConfig := tutils.CreateKeyAndTruststore(serverName, true)
keystoreSecret = tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret = tutils.EncryptionSecretClientTrustore(spec.Name, tutils.Namespace, truststore)
return
})
}
func TestClientCertGeneratedTruststoreAuthenticate(t *testing.T) {
testClientCert(t, func(spec *v1.Infinispan) (authType ispnv1.ClientCertType, keystoreSecret, truststoreSecret *corev1.Secret, tlsConfig *tls.Config) {
authType = ispnv1.ClientCertAuthenticate
serverName := tutils.GetServerName(spec)
keystore, caCert, clientCert, tlsConfig := tutils.CreateKeystoreAndClientCerts(serverName)
keystoreSecret = tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret = tutils.EncryptionSecretClientCert(spec.Name, tutils.Namespace, caCert, clientCert)
return
})
}
func TestClientCertGeneratedTruststoreValidate(t *testing.T) {
testClientCert(t, func(spec *v1.Infinispan) (authType ispnv1.ClientCertType, keystoreSecret, truststoreSecret *corev1.Secret, tlsConfig *tls.Config) {
authType = ispnv1.ClientCertValidate
serverName := tutils.GetServerName(spec)
keystore, caCert, _, tlsConfig := tutils.CreateKeystoreAndClientCerts(serverName)
keystoreSecret = tutils.EncryptionSecretKeystore(spec.Name, tutils.Namespace, keystore)
truststoreSecret = tutils.EncryptionSecretClientCert(spec.Name, tutils.Namespace, caCert, nil)
return
})
}
func testClientCert(t *testing.T, initializer func(*v1.Infinispan) (v1.ClientCertType, *corev1.Secret, *corev1.Secret, *tls.Config)) {
t.Parallel()
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Spec.Replicas = 1
// Create the keystore & truststore for the server with a compatible client tls configuration
authType, keystoreSecret, truststoreSecret, tlsConfig := initializer(spec)
spec.Spec.Security.EndpointEncryption = tutils.EndpointEncryptionClientCert(spec.Name, authType)
testKube.CreateSecret(keystoreSecret)
defer testKube.DeleteSecret(keystoreSecret)
testKube.CreateSecret(truststoreSecret)
defer testKube.DeleteSecret(truststoreSecret)
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
// Ensure that we can connect to the endpoint with TLS
host, client := tutils.HTTPSClientAndHost(spec, tlsConfig, testKube)
createCacheAndValidate("test", host, "", client)
}
// Test if spec.container.cpu update is handled
func TestContainerCPUUpdateWithTwoReplicas(t *testing.T) {
var modifier = func(ispn *ispnv1.Infinispan) {
ispn.Spec.Container.CPU = "550m"
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
limit := resource.MustParse("550m")
request := resource.MustParse("550m")
if limit.Cmp(ss.Spec.Template.Spec.Containers[0].Resources.Limits["cpu"]) != 0 ||
request.Cmp(ss.Spec.Template.Spec.Containers[0].Resources.Requests["cpu"]) != 0 {
panic("CPU field not updated")
}
}
spec := tutils.MinimalSpec
spec.Name = strcase.ToKebab(t.Name())
spec.Labels = map[string]string{"test-name": t.Name()}
genericTestForContainerUpdated(spec, modifier, verifier)
}
// Test if spec.container.memory update is handled
func TestContainerMemoryUpdate(t *testing.T) {
t.Parallel()
var modifier = func(ispn *ispnv1.Infinispan) {
ispn.Spec.Container.Memory = "256Mi"
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
if resource.MustParse("256Mi") != ss.Spec.Template.Spec.Containers[0].Resources.Requests["memory"] {
panic("Memory field not updated")
}
}
spec := tutils.DefaultSpec(testKube)
spec.Name = strcase.ToKebab(t.Name())
spec.Labels = map[string]string{"test-name": t.Name()}
genericTestForContainerUpdated(*spec, modifier, verifier)
}
func TestContainerJavaOptsUpdate(t *testing.T) {
t.Parallel()
var modifier = func(ispn *ispnv1.Infinispan) {
ispn.Spec.Container.ExtraJvmOpts = "-XX:NativeMemoryTracking=summary"
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
env := ss.Spec.Template.Spec.Containers[0].Env
for _, value := range env {
if value.Name == "JAVA_OPTIONS" {
if value.Value != "-XX:NativeMemoryTracking=summary" {
panic("JAVA_OPTIONS not updated")
} else {
return
}
}
}
panic("JAVA_OPTIONS not updated")
}
spec := tutils.DefaultSpec(testKube)
spec.Name = strcase.ToKebab(t.Name())
spec.Labels = map[string]string{"test-name": t.Name()}
genericTestForContainerUpdated(*spec, modifier, verifier)
}
func TestEndpointAuthenticationUpdate(t *testing.T) {
t.Parallel()
var modifier = func(ispn *ispnv1.Infinispan) {
ispn.Spec.Security.EndpointAuthentication = pointer.BoolPtr(true)
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ss.Name, ss.Namespace, ispnv1.ConditionWellFormed)
}
spec := tutils.DefaultSpec(testKube)
spec.Name = strcase.ToKebab(t.Name())
spec.Labels = map[string]string{"test-name": t.Name()}
spec.Spec.Security.EndpointAuthentication = pointer.BoolPtr(false)
genericTestForContainerUpdated(*spec, modifier, verifier)
}
func TestEndpointEncryptionUpdate(t *testing.T) {
t.Parallel()
spec := tutils.DefaultSpec(testKube)
spec.Name = strcase.ToKebab(t.Name())
spec.Labels = map[string]string{"test-name": t.Name()}
spec.Spec.Security = ispnv1.InfinispanSecurity{
EndpointEncryption: &ispnv1.EndpointEncryption{
Type: ispnv1.CertificateSourceTypeNoneNoEncryption,
},
}
// Create secret with server certificates
serverName := tutils.GetServerName(spec)
cert, privKey, tlsConfig := tutils.CreateServerCertificates(serverName)
secret := tutils.EncryptionSecret(spec.Name, tutils.Namespace, privKey, cert)
testKube.CreateSecret(secret)
defer testKube.DeleteSecret(secret)
var modifier = func(ispn *ispnv1.Infinispan) {
ispn.Spec.Security = ispnv1.InfinispanSecurity{
EndpointEncryption: tutils.EndpointEncryption(spec.Name),
}
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
// Ensure that we can connect to the endpoint with TLS
host, client := tutils.HTTPSClientAndHost(spec, tlsConfig, testKube)
checkRestConnection(host, client)
}
genericTestForContainerUpdated(*spec, modifier, verifier)
}
// Test if single node working correctly
func genericTestForContainerUpdated(ispn ispnv1.Infinispan, modifier func(*ispnv1.Infinispan), verifier func(*ispnv1.Infinispan, *appsv1.StatefulSet)) {
testKube.CreateInfinispan(&ispn, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, ispn.Labels)
testKube.WaitForInfinispanPods(int(ispn.Spec.Replicas), tutils.SinglePodTimeout, ispn.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
verifyStatefulSetUpdate(ispn, modifier, verifier)
}
func verifyStatefulSetUpdate(ispn ispnv1.Infinispan, modifier func(*ispnv1.Infinispan), verifier func(*ispnv1.Infinispan, *appsv1.StatefulSet)) {
// Get the associate StatefulSet
ss := appsv1.StatefulSet{}
// Get the current generation
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Namespace: ispn.Namespace, Name: ispn.GetStatefulSetName()}, &ss))
generation := ss.Status.ObservedGeneration
tutils.ExpectNoError(testKube.UpdateInfinispan(&ispn, func() {
modifier(&ispn)
}))
// Wait for a new generation to appear
err := wait.Poll(tutils.DefaultPollPeriod, tutils.SinglePodTimeout, func() (done bool, err error) {
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Namespace: ispn.Namespace, Name: ispn.Name}, &ss))
return ss.Status.ObservedGeneration >= generation+1, nil
})
tutils.ExpectNoError(err)
// Wait that current and update revisions match
// this ensure that the rolling upgrade completes
err = wait.Poll(tutils.DefaultPollPeriod, tutils.SinglePodTimeout, func() (done bool, err error) {
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Namespace: ispn.Namespace, Name: ispn.Name}, &ss))
return ss.Status.CurrentRevision == ss.Status.UpdateRevision, nil
})
tutils.ExpectNoError(err)
// Check that the update has been propagated
verifier(&ispn, &ss)
}
func TestCacheService(t *testing.T) {
t.Parallel()
testCacheService(t.Name())
}
func testCacheService(testName string) {
spec := tutils.DefaultSpec(testKube)
spec.Name = strcase.ToKebab(testName)
spec.Spec.Service.Type = ispnv1.ServiceTypeCache
spec.Spec.Expose = tutils.ExposeServiceSpec(testKube)
spec.Labels = map[string]string{"test-name": testName}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
cacheName := "default"
waitForCacheToBeCreated(cacheName, hostAddr, client)
key := "test"
value := "test-operator"
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
putViaRoute(keyURL, value, client)
actual := getViaRoute(keyURL, client)
if actual != value {
panic(fmt.Errorf("unexpected actual returned: %v (value %v)", actual, value))
}
}
// TestPermanentCache creates a permanent cache the stop/start
// the cluster and checks that the cache is still there
func TestPermanentCache(t *testing.T) {
t.Parallel()
name := strcase.ToKebab(t.Name())
cacheName := "test"
// Define function for the generic stop/start test procedure
var createPermanentCache = func(ispn *ispnv1.Infinispan) {
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
createCacheAndValidate(cacheName, hostAddr, "PERMANENT", client)
}
var usePermanentCache = func(ispn *ispnv1.Infinispan) {
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
key := "test"
value := "test-operator"
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
putViaRoute(keyURL, value, client)
actual := getViaRoute(keyURL, client)
if actual != value {
panic(fmt.Errorf("unexpected actual returned: %v (value %v)", actual, value))
}
deleteCache(cacheName, hostAddr, client)
}
genericTestForGracefulShutdown(name, createPermanentCache, usePermanentCache)
}
// TestCheckDataSurviveToShutdown creates a cache with file-store the stop/start
// the cluster and checks that the cache and the data are still there
func TestCheckDataSurviveToShutdown(t *testing.T) {
t.Parallel()
name := strcase.ToKebab(t.Name())
cacheName := "test"
template := `<infinispan><cache-container><distributed-cache name ="` + cacheName +
`"><persistence><file-store/></persistence></distributed-cache></cache-container></infinispan>`
key := "test"
value := "test-operator"
// Define function for the generic stop/start test procedure
var createCacheWithFileStore = func(ispn *ispnv1.Infinispan) {
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
createCacheWithXMLTemplate(cacheName, hostAddr, template, client)
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
putViaRoute(keyURL, value, client)
}
var useCacheWithFileStore = func(ispn *ispnv1.Infinispan) {
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
actual := getViaRoute(keyURL, client)
if actual != value {
panic(fmt.Errorf("unexpected actual returned: %v (value %v)", actual, value))
}
deleteCache(cacheName, hostAddr, client)
}
genericTestForGracefulShutdown(name, createCacheWithFileStore, useCacheWithFileStore)
}
func genericTestForGracefulShutdown(clusterName string, modifier func(*ispnv1.Infinispan), verifier func(*ispnv1.Infinispan)) {
// Create a resource without passing any config
// Register it
spec := tutils.DefaultSpec(testKube)
spec.Name = clusterName
spec.Labels = map[string]string{"test-name": clusterName}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
// Do something that needs to be permanent
modifier(ispn)
// Delete the cluster
testKube.GracefulShutdownInfinispan(spec)
testKube.GracefulRestartInfinispan(spec, 1, tutils.SinglePodTimeout)
// Do something that checks that permanent changes are there again
verifier(ispn)
}
func TestExternalService(t *testing.T) {
t.Parallel()
name := strcase.ToKebab(t.Name())
// Create a resource without passing any config
spec := ispnv1.Infinispan{
TypeMeta: tutils.InfinispanTypeMeta,
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: ispnv1.InfinispanSpec{
Container: ispnv1.InfinispanContainerSpec{
CPU: tutils.CPU,
Memory: tutils.Memory,
},
Replicas: 1,
Expose: tutils.ExposeServiceSpec(testKube),
},
}
// Register it
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(&spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
cacheName := "test"
createCacheAndValidate(cacheName, hostAddr, "", client)
defer deleteCache(cacheName, hostAddr, client)
key := "test"
value := "test-operator"
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
putViaRoute(keyURL, value, client)
actual := getViaRoute(keyURL, client)
if actual != value {
panic(fmt.Errorf("unexpected actual returned: %v (value %v)", actual, value))
}
}
// TestExternalServiceWithAuth starts a cluster and checks application
// and management connection with authentication
func TestExternalServiceWithAuth(t *testing.T) {
t.Parallel()
usr := "connectorusr"
pass := "connectorpass"
newpass := "connectornewpass"
identitiesYaml, err := users.CreateIdentitiesFor(usr, pass)
tutils.ExpectNoError(err)
// Create secret with application credentials
secret := corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: "conn-secret-test",
Namespace: tutils.Namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{cconsts.ServerIdentitiesFilename: string(identitiesYaml)},
}
testKube.CreateSecret(&secret)
defer testKube.DeleteSecret(&secret)
name := strcase.ToKebab(t.Name())
// Create Infinispan
spec := ispnv1.Infinispan{
TypeMeta: tutils.InfinispanTypeMeta,
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: ispnv1.InfinispanSpec{
Security: ispnv1.InfinispanSecurity{EndpointSecretName: "conn-secret-test"},
Container: ispnv1.InfinispanContainerSpec{
CPU: tutils.CPU,
Memory: tutils.Memory,
},
Replicas: 1,
Expose: tutils.ExposeServiceSpec(testKube),
},
}
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(&spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
schema := testKube.GetSchemaForRest(ispn)
testAuthentication(ispn, schema, usr, pass)
// Update the auth credentials.
identitiesYaml, err = users.CreateIdentitiesFor(usr, newpass)
tutils.ExpectNoError(err)
// Create secret with application credentials
secret1 := corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: "conn-secret-test-1",
Namespace: tutils.Namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{cconsts.ServerIdentitiesFilename: string(identitiesYaml)},
}
testKube.CreateSecret(&secret1)
defer testKube.DeleteSecret(&secret1)
// Get the associate statefulset
ss := appsv1.StatefulSet{}
// Get the current generation
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Namespace: spec.Namespace, Name: spec.GetStatefulSetName()}, &ss))
generation := ss.Status.ObservedGeneration
err = testKube.UpdateInfinispan(&spec, func() {
spec.Spec.Security.EndpointSecretName = "conn-secret-test-1"
})
tutils.ExpectNoError(err)
// Wait for a new generation to appear
err = wait.Poll(tutils.DefaultPollPeriod, tutils.SinglePodTimeout, func() (done bool, err error) {
tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Namespace: spec.Namespace, Name: spec.Name}, &ss))
return ss.Status.ObservedGeneration >= generation+1, nil
})
tutils.ExpectNoError(err)
// Sleep for a while to be sure that the old pods are gone
// The restart is ongoing and it would that more than 10 sec
// so we're not introducing any delay
time.Sleep(10 * time.Second)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
testAuthentication(ispn, schema, usr, newpass)
}
func testAuthentication(ispn *ispnv1.Infinispan, schema, usr, pass string) {
badClient := tutils.NewHTTPClient("badUser", "badPass", schema)
client := tutils.NewHTTPClient(usr, pass, schema)
hostAddr := testKube.WaitForExternalService(ispn, tutils.RouteTimeout, client)
cacheName := "test"
createCacheBadCreds(cacheName, hostAddr, badClient)
createCacheAndValidate(cacheName, hostAddr, "", client)
defer deleteCache(cacheName, hostAddr, client)
key := "test"
value := "test-operator"
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
putViaRoute(keyURL, value, client)
actual := getViaRoute(keyURL, client)
if actual != value {
panic(fmt.Errorf("unexpected actual returned: %v (value %v)", actual, value))
}
}
func TestAuthenticationDisabled(t *testing.T) {
t.Parallel()
namespace := tutils.Namespace
// Create a resource without passing any config
name := strcase.ToKebab(t.Name())
spec := tutils.DefaultSpec(testKube)
spec.Name = name
spec.Spec.Security.EndpointAuthentication = pointer.BoolPtr(false)
// Create the cluster
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, name, namespace)
testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
// Ensure the identities secret is not created
secret := &corev1.Secret{}
key := types.NamespacedName{
Namespace: namespace,
Name: spec.GetSecretName(),
}
tutils.ExpectNotFound(testKube.Kubernetes.Client.Get(context.TODO(), key, secret))
// Ensure that rest requests do not require authentication
client := tutils.NewHTTPClientNoAuth(testKube.GetSchemaForRest(spec))
hostAddr := testKube.WaitForExternalService(spec, tutils.RouteTimeout, client)
url := fmt.Sprintf("%v/rest/v2/caches", hostAddr)
rsp, err := client.Get(url, nil)
tutils.ExpectNoError(err)
if rsp.StatusCode != http.StatusOK {
panic(httpError{rsp.StatusCode})
}
}
func TestAuthorizationDisabledByDefault(t *testing.T) {
t.Parallel()
name := strcase.ToKebab(t.Name())
ispn := tutils.DefaultSpec(testKube)
ispn.Name = name
ispn.Labels = map[string]string{"test-name": t.Name()}
identities := func() users.Identities {
return users.Identities{
Credentials: []users.Credentials{{
Username: "usr",
Password: "pass",
Roles: []string{"monitor"},
}},
}
}
verify := func(hostAddr string, client tutils.HTTPClient) {
url := fmt.Sprintf("%v/rest/v2/caches", hostAddr)
rsp, err := client.Get(url, nil)
tutils.ExpectNoError(err)
if rsp.StatusCode != http.StatusOK {
panic(httpError{rsp.StatusCode})
}
}
testAuthorization(ispn, identities, verify)
}
func TestAuthorizationWithCustomRoles(t *testing.T) {
t.Parallel()
name := strcase.ToKebab(t.Name())
ispn := tutils.DefaultSpec(testKube)
customRoleName := "custom-role"
ispn.Name = name
ispn.Labels = map[string]string{"test-name": t.Name()}
ispn.Spec.Security.Authorization = &v1.Authorization{
Enabled: true,
Roles: []ispnv1.AuthorizationRole{{
Name: customRoleName,
Permissions: []string{"ALL"},
}},
}
identities := func() users.Identities {
return users.Identities{
Credentials: []users.Credentials{
{
Username: "usr",
Password: "pass",
Roles: []string{customRoleName},
}, {
Username: "monitor-user",
Password: "pass",
Roles: []string{"monitor"},
}, {
// #1296 Add a user with no Roles defined to ensure that IDENTITIES_BATCH works as expected
Username: "usr-no-role",
Password: "pass",
},
},
}
}
verify := func(hostAddr string, client tutils.HTTPClient) {
createCacheAndValidate("succeed-cache", hostAddr, "", client)
schema := testKube.GetSchemaForRest(ispn)
rsp := createCache("fail-cache", hostAddr, "", tutils.NewHTTPClient("monitor-user", "pass", schema))
if rsp.StatusCode != http.StatusForbidden {
panic(httpError{rsp.StatusCode})
}
}
testAuthorization(ispn, identities, verify)
}
func testAuthorization(ispn *v1.Infinispan, createIdentities func() users.Identities, verify func(string, tutils.HTTPClient)) {
namespace := tutils.Namespace
secretName := ispn.Name + "-id-secret"
ispn.Spec.Security.EndpointSecretName = secretName
identities := createIdentities()
identitiesYaml, err := yaml.Marshal(identities)
tutils.ExpectNoError(err)
// Create secret with application credentials
secret := corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{cconsts.ServerIdentitiesFilename: string(identitiesYaml)},
}
testKube.CreateSecret(&secret)
defer testKube.DeleteSecret(&secret)
// Create the cluster
testKube.CreateInfinispan(ispn, namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, ispn.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, ispn.Name, namespace)
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
schema := testKube.GetSchemaForRest(ispn)
user := identities.Credentials[0].Username
pass := identities.Credentials[0].Password
client := tutils.NewHTTPClient(user, pass, schema)
hostAddr := testKube.WaitForExternalService(ispn, tutils.RouteTimeout, client)
// Verify authorization works as expected
verify(hostAddr, client)
}
func TestExternalDependenciesHttp(t *testing.T) {
if os.Getenv("NO_NGINX") != "" {
t.Skip("Skipping test, no Nginx available.")
}
webServerConfig := prepareWebServer()
defer testKube.DeleteResource(tutils.Namespace, labels.SelectorFromSet(map[string]string{"app": tutils.WebServerName}), webServerConfig, tutils.SinglePodTimeout)
namespace := tutils.Namespace
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Labels = map[string]string{"test-name": t.Name()}
spec.Spec.Dependencies = &ispnv1.InfinispanExternalDependencies{
Artifacts: []ispnv1.InfinispanExternalArtifacts{
{Url: fmt.Sprintf("http://%s:%d/task01-1.0.0.jar", tutils.WebServerName, tutils.WebServerPortNumber)},
{Url: fmt.Sprintf("http://%s:%d/task02-1.0.0.zip", tutils.WebServerName, tutils.WebServerPortNumber)},
},
}
// Create the cluster
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, name, namespace)
ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
validateTaskExecution := func(task, param string, status int, result string) {
url := fmt.Sprintf("%s/rest/v2/tasks/%s?action=exec¶m.name=%s", hostAddr, task, param)
resp, err := client.Post(url, "", nil)
tutils.ExpectNoError(err)
defer func(Body io.ReadCloser) {
tutils.ExpectNoError(Body.Close())
}(resp.Body)
if resp.StatusCode != status {
panic(fmt.Sprintf("Unexpected response code %d for the Server Task execution", resp.StatusCode))
}
if resp.StatusCode == http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
tutils.ExpectNoError(err)
if string(body) != result {
panic(fmt.Sprintf("Unexpected task %s response '%s' from the Server Task", task, string(body)))
}
}
}
for _, task := range []string{"01", "02"} {
validateTaskExecution("task-"+task, "World", http.StatusOK, "Hello World")
}
var externalLibraryAddModify = func(ispn *ispnv1.Infinispan) {
libs := &ispn.Spec.Dependencies.Artifacts
*libs = append(*libs, ispnv1.InfinispanExternalArtifacts{Url: fmt.Sprintf("http://%s:%d/task03-1.0.0.tar.gz", tutils.WebServerName, tutils.WebServerPortNumber)})
}
var externalLibraryAddVerify = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
validateTaskExecution("task-03", "World", http.StatusOK, "Hello World")
}
verifyStatefulSetUpdate(*ispn, externalLibraryAddModify, externalLibraryAddVerify)
var externalLibraryHashModify = func(ispn *ispnv1.Infinispan) {
for taskName, taskData := range webServerConfig.BinaryData {
for artifactIndex, artifact := range ispn.Spec.Dependencies.Artifacts {
if strings.Contains(artifact.Url, taskName) {
ispn.Spec.Dependencies.Artifacts[artifactIndex].Hash = fmt.Sprintf("sha1:%s", hash.HashByte(taskData))
}
}
}
}
var externalLibraryHashVerify = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
for _, task := range []string{"01", "02", "03"} {
validateTaskExecution("task-"+task, "World", http.StatusOK, "Hello World")
}
}
verifyStatefulSetUpdate(*ispn, externalLibraryHashModify, externalLibraryHashVerify)
var externalLibraryFailHashModify = func(ispn *ispnv1.Infinispan) {
ispn.Spec.Dependencies.Artifacts[1].Hash = fmt.Sprintf("sha1:%s", "failhash")
}
tutils.ExpectNoError(testKube.UpdateInfinispan(ispn, func() {
externalLibraryFailHashModify(ispn)
}))
podList := &corev1.PodList{}
tutils.ExpectNoError(wait.Poll(tutils.DefaultPollPeriod, tutils.SinglePodTimeout, func() (done bool, err error) {
err = testKube.Kubernetes.ResourcesList(ispn.Namespace, controllers.PodLabels(ispn.Name), podList, context.TODO())
if err != nil {
return false, nil
}
for _, pod := range podList.Items {
if kube.InitContainerFailed(pod.Status.InitContainerStatuses) {
return true, nil
}
}
return false, nil
}))
var externalLibraryRemoveModify = func(ispn *ispnv1.Infinispan) {
ispn.Spec.Dependencies = nil
}
var externalLibraryRemoveVerify = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
for _, task := range []string{"01", "02", "03"} {
validateTaskExecution("task-"+task, "", http.StatusBadRequest, "")
}
}
verifyStatefulSetUpdate(*ispn, externalLibraryRemoveModify, externalLibraryRemoveVerify)
}
func prepareWebServer() *corev1.ConfigMap {
webServerConfig := &corev1.ConfigMap{}
testKube.LoadResourceFromYaml("../utils/data/external-libs-config.yaml", webServerConfig)
webServerConfig.Namespace = tutils.Namespace
testKube.Create(webServerConfig)
webServerPodConfig := tutils.WebServerPod(tutils.WebServerName, tutils.Namespace, webServerConfig.Name, tutils.WebServerRootFolder, tutils.WebServerImageName)
tutils.ExpectNoError(controllerutil.SetControllerReference(webServerConfig, webServerPodConfig, tutils.Scheme))
testKube.Create(webServerPodConfig)
webServerService := tutils.WebServerService(tutils.WebServerName, tutils.Namespace)
tutils.ExpectNoError(controllerutil.SetControllerReference(webServerConfig, webServerService, tutils.Scheme))
testKube.Create(webServerService)
testKube.WaitForPods(1, tutils.SinglePodTimeout, &client.ListOptions{Namespace: tutils.Namespace, LabelSelector: labels.SelectorFromSet(map[string]string{"app": tutils.WebServerName})}, nil)
return webServerConfig
}
func cacheURL(cacheName, hostAddr string) string {
return fmt.Sprintf("%v/rest/v2/caches/%s", hostAddr, cacheName)
}
type httpError struct {
status int
}
func (e *httpError) Error() string {
return fmt.Sprintf("unexpected response %v", e.status)
}
func createCache(cacheName, hostAddr string, flags string, client tutils.HTTPClient) *http.Response {
httpURL := cacheURL(cacheName, hostAddr)
headers := map[string]string{}
if flags != "" {
headers["Flags"] = flags
}
resp, err := client.Post(httpURL, "", headers)
tutils.ExpectNoError(err)
return resp
}
func createCacheAndValidate(cacheName, hostAddr string, flags string, client tutils.HTTPClient) {
resp := createCache(cacheName, hostAddr, flags, client)
defer tutils.CloseHttpResponse(resp)
if resp.StatusCode != http.StatusOK {
panic(httpError{resp.StatusCode})
}
}
func createCacheBadCreds(cacheName, hostAddr string, client tutils.HTTPClient) {
defer func() {
data := recover()
if data == nil {
panic("createCacheBadCred should fail, but it doesn't")
}
err := data.(httpError)
if err.status != http.StatusUnauthorized {
panic(err)
}
}()
createCacheAndValidate(cacheName, hostAddr, "", client)
}
func createCacheWithXMLTemplate(cacheName, hostAddr, template string, client tutils.HTTPClient) {
httpURL := cacheURL(cacheName, hostAddr)
fmt.Printf("Create cache: %v\n", httpURL)
headers := map[string]string{
"Content-Type": "application/xml;charset=UTF-8",
}
resp, err := client.Post(httpURL, template, headers)
defer tutils.CloseHttpResponse(resp)
tutils.ExpectNoError(err)
// Accept all the 2xx success codes
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
throwHTTPError(resp)
}
}
func deleteCache(cacheName, hostAddr string, client tutils.HTTPClient) {
httpURL := cacheURL(cacheName, hostAddr)
resp, err := client.Delete(httpURL, nil)
tutils.ExpectNoError(err)
if resp.StatusCode != http.StatusOK {
panic(httpError{resp.StatusCode})
}
}
func getViaRoute(url string, client tutils.HTTPClient) string {
resp, err := client.Get(url, nil)
tutils.ExpectNoError(err)
defer func(Body io.ReadCloser) {
tutils.ExpectNoError(Body.Close())
}(resp.Body)
if resp.StatusCode != http.StatusOK {
throwHTTPError(resp)
}
bodyBytes, err := ioutil.ReadAll(resp.Body)
tutils.ExpectNoError(err)
return string(bodyBytes)
}
func putViaRoute(url, value string, client tutils.HTTPClient) {
headers := map[string]string{
"Content-Type": "text/plain",
}
resp, err := client.Post(url, value, headers)
defer tutils.CloseHttpResponse(resp)
tutils.ExpectNoError(err)
if resp.StatusCode != http.StatusNoContent {
throwHTTPError(resp)
}
}
func waitForCacheToBeCreated(cacheName, hostAddr string, client tutils.HTTPClient) {
err := wait.Poll(tutils.DefaultPollPeriod, tutils.MaxWaitTimeout, func() (done bool, err error) {
httpURL := cacheURL(cacheName, hostAddr)
fmt.Printf("Waiting for cache to be created")
resp, err := client.Get(httpURL, nil)
if err != nil {
return false, err
}
return resp.StatusCode == http.StatusOK, nil
})
tutils.ExpectNoError(err)
}
func throwHTTPError(resp *http.Response) {
errorBytes, _ := ioutil.ReadAll(resp.Body)
panic(fmt.Errorf("unexpected HTTP status code (%d): %s", resp.StatusCode, string(errorBytes)))
}
func TestCacheCR(t *testing.T) {
t.Parallel()
spec := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
spec.Name = name
spec.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(spec, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, spec.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, spec.Name, tutils.Namespace)
ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed)
//Test for CacheCR with Templatename
cacheCRTemplateName := createCacheWithCR("cache-with-static-template", spec.Namespace, name)
cacheCRTemplateName.Spec.TemplateName = "org.infinispan.DIST_SYNC"
testCacheWithCR(ispn, cacheCRTemplateName)
//Test for CacheCR with TemplateXML
cacheCRTemplateXML := createCacheWithCR("cache-with-xml-template", spec.Namespace, name)
cacheCRTemplateXML.Spec.Template = "<infinispan><cache-container><distributed-cache name=\"cache-with-xml-template\" mode=\"SYNC\"><persistence><file-store/></persistence></distributed-cache></cache-container></infinispan>"
testCacheWithCR(ispn, cacheCRTemplateXML)
}
func testCacheWithCR(ispn *ispnv1.Infinispan, cache *v2alpha1.Cache) {
key := "testkey"
value := "test-operator"
testKube.Create(cache)
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
condition := v2alpha1.CacheCondition{
Type: "Ready",
Status: "True",
}
testKube.WaitForCacheCondition(cache.Spec.Name, cache.Namespace, condition)
waitForCacheToBeCreated(cache.Spec.Name, hostAddr, client)
testBasicCacheUsage(key, value, cache.Spec.Name, hostAddr, client)
defer testKube.DeleteCache(cache)
}
func createCacheWithCR(cacheName string, nameSpace string, clusterName string) *v2alpha1.Cache {
return &v2alpha1.Cache{
TypeMeta: metav1.TypeMeta{
APIVersion: "infinispan.org/v2alpha1",
Kind: "Cache",
},
ObjectMeta: metav1.ObjectMeta{
Name: cacheName,
Namespace: nameSpace,
},
Spec: v2alpha1.CacheSpec{
ClusterName: clusterName,
Name: cacheName,
},
}
}
func TestPodDegradationAfterOOM(t *testing.T) {
t.Parallel()
//Creating Infinispan cluster
ispn := tutils.DefaultSpec(testKube)
name := strcase.ToKebab(t.Name())
ispn.Name = name
ispn.Labels = map[string]string{"test-name": t.Name()}
ispn.Spec.Replicas = 2
ispn.Spec.Container.Memory = "256Mi"
testKube.CreateInfinispan(ispn, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, ispn.Labels)
testKube.WaitForInfinispanPods(int(ispn.Spec.Replicas), tutils.SinglePodTimeout, ispn.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
//Creating cache
cacheName := "failover-cache"
template := `<replicated-cache name ="` + cacheName + `"><encoding media-type="text/plain"/></replicated-cache>`
veryLongValue := GenerateStringWithCharset(100000)
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
createCacheWithXMLTemplate(cacheName, hostAddr, template, client)
//Generate tons of random entries
for key := 1; key < 50000; key++ {
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
headers := map[string]string{"Content-Type": "text/plain"}
_, error := client.Post(keyURL, veryLongValue, headers)
if error != nil {
fmt.Printf("ERROR for key=%d, Description=%s\n", key, error)
break
}
}
//Check if all pods are running and they are not degraded
testKube.WaitForInfinispanPods(int(ispn.Spec.Replicas), tutils.SinglePodTimeout, ispn.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
//Verify whether the pod restarted for an OOM exception
hasOOMhappened := false
podList := &corev1.PodList{}
tutils.ExpectNoError(testKube.Kubernetes.ResourcesList(tutils.Namespace, controllers.PodLabels(ispn.Name), podList, context.TODO()))
for _, pod := range podList.Items {
status := pod.Status.ContainerStatuses
out:
for _, containerStatuses := range status {
if containerStatuses.LastTerminationState.Terminated != nil {
terminatedPod := containerStatuses.LastTerminationState.Terminated
if terminatedPod.Reason == "OOMKilled" {
hasOOMhappened = true
fmt.Printf("ExitCode='%d' Reason='%s' Message='%s'\n", terminatedPod.ExitCode, terminatedPod.Reason, terminatedPod.Message)
break out
}
}
}
}
if kube.AreAllPodsReady(podList) && hasOOMhappened {
fmt.Println("All pods are ready")
} else if kube.AreAllPodsReady(podList) && !hasOOMhappened {
panic("Test finished without an OutOfMemory occurred")
} else {
panic("One of the pods is degraded")
}
}
func GenerateStringWithCharset(length int) string {
const charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
var seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
}
// Test custom configuration with cache-container element
func TestUserXmlCustomConfig(t *testing.T) {
t.Parallel()
configMap := newCustomConfigMap(t.Name(), "xml")
testCustomConfig(t, configMap)
}
func TestUserYamlCustomConfig(t *testing.T) {
t.Parallel()
configMap := newCustomConfigMap(t.Name(), "yaml")
testCustomConfig(t, configMap)
}
func TestUserJsonCustomConfig(t *testing.T) {
t.Parallel()
configMap := newCustomConfigMap(t.Name(), "json")
testCustomConfig(t, configMap)
}
func testCustomConfig(t *testing.T, configMap *corev1.ConfigMap) {
testKube.Create(configMap)
defer testKube.DeleteConfigMap(configMap)
// Create a resource without passing any config
ispn := tutils.DefaultSpec(testKube)
ispn.Spec.ConfigMapName = configMap.Name
ispn.Name = strcase.ToKebab(t.Name())
// Register it
ispn.Labels = map[string]string{"test-name": t.Name()}
testKube.CreateInfinispan(ispn, tutils.Namespace)
defer testKube.CleanNamespaceAndLogOnPanic(tutils.Namespace, ispn.Labels)
testKube.WaitForInfinispanPods(1, tutils.SinglePodTimeout, ispn.Name, tutils.Namespace)
testKube.WaitForInfinispanCondition(ispn.Name, ispn.Namespace, ispnv1.ConditionWellFormed)
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, t.Name(), hostAddr, client)
}
func testBasicCacheUsage(key, value, cacheName, hostAddr string, client tutils.HTTPClient) {
keyURL := fmt.Sprintf("%v/%v", cacheURL(cacheName, hostAddr), key)
putViaRoute(keyURL, value, client)
actual := getViaRoute(keyURL, client)
if actual != value {
panic(fmt.Errorf("unexpected actual returned: %v (value %v)", actual, value))
}
}
// TestUserCustomConfigWithAuthUpdate tests that user custom config works well with update
// using authentication update to trigger a cluster update
func TestUserCustomConfigWithAuthUpdate(t *testing.T) {
t.Parallel()
configMap := newCustomConfigMap(t.Name(), "xml")
testKube.Create(configMap)
defer testKube.DeleteConfigMap(configMap)
var modifier = func(ispn *ispnv1.Infinispan) {
// testing cache pre update
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, t.Name(), hostAddr, client)
ispn.Spec.Security.EndpointAuthentication = pointer.BoolPtr(true)
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ss.Name, ss.Namespace, ispnv1.ConditionWellFormed)
// testing cache post update
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, t.Name(), hostAddr, client)
}
ispn := tutils.DefaultSpec(testKube)
ispn.Name = strcase.ToKebab(t.Name())
ispn.Labels = map[string]string{"test-name": t.Name()}
ispn.Spec.Security.EndpointAuthentication = pointer.BoolPtr(false)
ispn.Spec.ConfigMapName = configMap.Name
genericTestForContainerUpdated(*ispn, modifier, verifier)
}
// TestUserCustomConfigUpdateOnNameChange tests that user custom config works well with user config update
func TestUserCustomConfigUpdateOnNameChange(t *testing.T) {
t.Parallel()
configMap := newCustomConfigMap(t.Name(), "xml")
testKube.Create(configMap)
defer testKube.DeleteConfigMap(configMap)
configMapChanged := newCustomConfigMap(t.Name()+"Changed", "xml")
testKube.Create(configMapChanged)
defer testKube.DeleteConfigMap(configMapChanged)
var modifier = func(ispn *ispnv1.Infinispan) {
// testing cache pre update
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, t.Name(), hostAddr, client)
ispn.Spec.ConfigMapName = configMapChanged.Name
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ss.Name, ss.Namespace, ispnv1.ConditionWellFormed)
// testing cache post update
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, t.Name()+"Changed", hostAddr, client)
}
ispn := tutils.DefaultSpec(testKube)
ispn.Name = strcase.ToKebab(t.Name())
ispn.Labels = map[string]string{"test-name": t.Name()}
ispn.Spec.Security.EndpointAuthentication = pointer.BoolPtr(false)
ispn.Spec.ConfigMapName = configMap.Name
genericTestForContainerUpdated(*ispn, modifier, verifier)
}
func TestUserCustomConfigUpdateOnChange(t *testing.T) {
t.Parallel()
configMap := newCustomConfigMap(t.Name(), "xml")
testKube.Create(configMap)
defer testKube.DeleteConfigMap(configMap)
newCacheName := t.Name() + "Updated"
var modifier = func(ispn *ispnv1.Infinispan) {
// testing cache pre update
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, t.Name(), hostAddr, client)
configMapUpdated := newCustomConfigMap(newCacheName, "xml")
// Reuse old name to test CM in-place update
configMapUpdated.Name = strcase.ToKebab(t.Name())
testKube.UpdateConfigMap(configMapUpdated)
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ss.Name, ss.Namespace, ispnv1.ConditionWellFormed)
// testing cache post update
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, newCacheName, hostAddr, client)
}
ispn := tutils.DefaultSpec(testKube)
ispn.Name = strcase.ToKebab(t.Name())
ispn.Labels = map[string]string{"test-name": t.Name()}
ispn.Spec.Security.EndpointAuthentication = pointer.BoolPtr(false)
ispn.Spec.ConfigMapName = configMap.Name
genericTestForContainerUpdated(*ispn, modifier, verifier)
}
// TestUserCustomConfigUpdateOnAdd tests that user custom config works well with user config update
func TestUserCustomConfigUpdateOnAdd(t *testing.T) {
t.Parallel()
configMap := newCustomConfigMap(t.Name(), "xml")
testKube.Create(configMap)
defer testKube.DeleteConfigMap(configMap)
var modifier = func(ispn *ispnv1.Infinispan) {
tutils.ExpectNoError(testKube.UpdateInfinispan(ispn, func() {
ispn.Spec.ConfigMapName = configMap.Name
}))
}
var verifier = func(ispn *ispnv1.Infinispan, ss *appsv1.StatefulSet) {
testKube.WaitForInfinispanCondition(ss.Name, ss.Namespace, ispnv1.ConditionWellFormed)
// testing cache post update
key := "testkey"
value := "test-operator"
hostAddr, client := tutils.HTTPClientAndHost(ispn, testKube)
testBasicCacheUsage(key, value, t.Name(), hostAddr, client)
}
ispn := tutils.DefaultSpec(testKube)
ispn.Name = strcase.ToKebab(t.Name())
ispn.Labels = map[string]string{"test-name": t.Name()}
ispn.Spec.Security.EndpointAuthentication = pointer.BoolPtr(false)
genericTestForContainerUpdated(*ispn, modifier, verifier)
}
func newCustomConfigMap(name, format string) *corev1.ConfigMap {
var userCacheContainer string
switch format {
case "xml":
userCacheContainer = `<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:13.0 http://www.infinispan.org/schemas/infinispan-config-13.0.xsd
urn:infinispan:server:13.0 http://www.infinispan.org/schemas/infinispan-server-13.0.xsd"
xmlns="urn:infinispan:config:13.0"
xmlns:server="urn:infinispan:server:13.0">
<cache-container name="default" statistics="true">
<distributed-cache name="` + name + `"/>
</cache-container>
</infinispan>`
case "yaml":
userCacheContainer = `infinispan:
cacheContainer:
name: default
distributedCache:
name: ` + name
case "json":
userCacheContainer = `{ "infinispan": { "cacheContainer": { "name": "default", "distributedCache": { "name": "` + name + `"}}}}`
}
return &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: strcase.ToKebab(name),
Namespace: tutils.Namespace},
Data: map[string]string{"infinispan-config." + format: userCacheContainer},
}
}
|
[
"\"TESTING_CONTEXT\"",
"\"NO_NGINX\""
] |
[] |
[
"NO_NGINX",
"TESTING_CONTEXT"
] |
[]
|
["NO_NGINX", "TESTING_CONTEXT"]
|
go
| 2 | 0 | |
tests/cli/__init__.py
|
import os
from pathlib import Path
from subprocess import run, PIPE, CompletedProcess
from collections import defaultdict
import json
import pytest
import yaml
EXECUTION_TIMEOUT = 2 # seconds
class CLI:
"""Helper class to ease testing of CLI commands"""
def __init__(self, executable_list, tmpdir, print_debug_output=True):
self._executable = executable_list
self.tmpdir = tmpdir
self._tmpfile_auto_increment = defaultdict(int)
# print stdout/err and exit code so that in case of errors we can see
# what happened
self._print_debug_output = print_debug_output
def __call__(self, *args, stdin="", env={}, encoding="UTF-8") -> CompletedProcess:
# patch PATH into env if not already set
env.setdefault("PATH", os.environ["PATH"])
result = run(
["tpl", *[str(arg) for arg in args]],
timeout=EXECUTION_TIMEOUT,
stdout=PIPE,
stderr=PIPE,
input=str(stdin).encode(encoding),
env=env,
cwd=str(self.tmpdir)
)
# Python 3.5 doesn't support the `encoding` argument to `run()`,
# so we have to manually decode the byte strings
result.stdout = result.stdout.decode(encoding)
result.stderr = result.stderr.decode(encoding)
if self._print_debug_output:
self.print_debug_info_for_call(result)
return result
def _print_stream_output(self, call_result: CompletedProcess, stream_name: str):
stream = getattr(call_result, stream_name.lower())
name = stream_name.upper()
print(name + ":", end="")
if len(stream) == 0:
print(" (stream is empty)")
elif stream == "\n":
print(" (stream is empty, containts only one newline)")
elif stream[-1] != "\n":
print(" (does not end in newline)")
else:
print()
print("-" * 24)
print(stream, end="")
# if it doesn't end in a newline add one so the seperation doesn't start
# directly after the output
if len(stream) > 0 and stream[-1] != "\n":
print()
print("=" * 24)
def print_debug_info_for_call(self, call_result: CompletedProcess):
print("Command:", call_result.args)
print("Return code:", call_result.returncode)
self._print_stream_output(call_result, "stdout")
self._print_stream_output(call_result, "stderr")
print("Folder hierarchy:")
print(self.folder_tree())
def folder_tree(self, path=None):
if path is None:
path = self.tmpdir
path = Path(str(path))
return "./\n" + "\n".join(self._folder_structure_recursive(path))
def _folder_structure_recursive(self, path: Path):
for item in path.iterdir():
yield "|-- " + item.name
if item.is_dir():
for line in self._folder_structure_recursive(item):
yield "| " + line
def _normalize_filename(self, name):
allowed_chars = (
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"01234567890"
"-_."
)
return "".join([c for c in str(name) if c in allowed_chars][:32])
def unique_file(self, name="") -> Path:
"""Generate a unique filename that can be used in the tmpdir"""
normalized = self._normalize_filename(name)
index = str(self._tmpfile_auto_increment[normalized])
self._tmpfile_auto_increment[normalized] += 1
filename = normalized + "-" + index
if len(normalized) == 0:
filename = index
return Path(str(self.tmpdir), filename)
def path_for_content(self, file_content, encoding="UTF-8", name="") -> Path:
if name == "":
name = file_content # use the first few characters to form a name
file_path = self.unique_file(name)
with file_path.open("wb") as file:
file.write(str(file_content).encode(encoding))
return file_path
def path_for_json(self, content: dict, encoding="UTF-8", name="") -> Path:
if name == "":
name = "json-data"
return self.path_for_content(json.dumps(content), encoding, name)
def path_for_yaml(self, content: dict, encoding="UTF-8", name="") -> Path:
if name == "":
name = "yaml-data"
return self.path_for_content(
yaml.dump(content, default_flow_style=False),
encoding,
name
)
@pytest.fixture
def cli(tmpdir):
yield CLI("tpl", tmpdir)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
wexpect/wexpect_util.py
|
"""Wexpect is a Windows variant of pexpect https://pexpect.readthedocs.io.
Wexpect is a Python module for spawning child applications and controlling
them automatically.
wexpect util contains small functions, and classes, which are used in multiple classes.
The command line argument parsers, and the Exceptions placed here.
"""
import re
import traceback
import sys
import os
import logging
import signal
# platform does not define VEOF so assume CTRL-D
EOF_CHAR = b'\x04'
SIGNAL_CHARS = {
signal.SIGTERM: b'\x011', # Device control 1
signal.SIGINT: b'\x012', # Device control 2
}
SPAM = 5
logging.addLevelName(SPAM, "SPAM")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else: # pragma: no cover
raise argparse.ArgumentTypeError('Boolean value expected.')
def spam(self, message, *args, **kws): # pragma: no cover
'''Very verbose debug dunction.
'''
if self.isEnabledFor(SPAM):
# Yes, logger takes its '*args' as 'args'.
self._log(SPAM, message, args, **kws)
logging.Logger.spam = spam
def init_logger(logger=None): # pragma: no cover
'''Initializes the logger. I wont measure coverage for this debug method.
'''
if logger is None:
logger = logging.getLogger('wexpect')
try:
logger_level = os.environ['WEXPECT_LOGGER_LEVEL']
try:
logger_filename = os.environ['WEXPECT_LOGGER_FILENAME']
except KeyError:
pid = os.getpid()
logger_filename = f'./.wlog/wexpect_{pid}'
logger.setLevel(logger_level)
logger_filename = f'{logger_filename}.log'
os.makedirs(os.path.dirname(logger_filename), exist_ok=True)
fh = logging.FileHandler(logger_filename, 'a', 'utf-8')
formatter = logging.Formatter(
'%(asctime)s - %(filename)s:%(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
except KeyError:
logger.setLevel(logging.ERROR)
def split_command_line(command_line, escape_char='^'):
"""This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
state_whitespace = 4 # The state of consuming whitespace between commands.
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == escape_char: # Escape the next character
state = state_esc
elif c == r"'": # Handle single quote
state = state_singlequote
elif c == r'"': # Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
None # Do nothing.
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
def join_args(args):
"""Joins arguments a command line. It quotes all arguments that contain
spaces or any of the characters ^!$%&()[]{}=;'+,`~"""
commandline = []
for arg in args:
if re.search('[\\^!$%&()[\\]{}=;\'+,`~\\s]', arg):
arg = '"%s"' % arg
commandline.append(arg)
return ' '.join(commandline)
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Wexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
tblist = [item for item in tblist if self.__filter_not_wexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_wexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'wexpect.py' in it. """
if trace_list_item[0].find('host.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child. This usually means the child has exited.
The user can wait to EOF, which means he waits the end of the execution of the child process."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
init_logger()
|
[] |
[] |
[
"WEXPECT_LOGGER_FILENAME",
"WEXPECT_LOGGER_LEVEL"
] |
[]
|
["WEXPECT_LOGGER_FILENAME", "WEXPECT_LOGGER_LEVEL"]
|
python
| 2 | 0 | |
kentest/asgi.py
|
"""
ASGI config for kentest project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kentest.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
caddy/caddymain/run.go
|
// Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddymain
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/google/uuid"
"github.com/klauspost/cpuid"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddytls"
"github.com/mholt/caddy/telemetry"
"github.com/mholt/certmagic"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
_ "github.com/mholt/caddy/caddyhttp" // plug in the HTTP server type
// This is where other plugins get plugged in (imported)
)
func init() {
caddy.TrapSignals()
setVersion()
flag.BoolVar(&certmagic.Agreed, "agree", false, "Agree to the CA's Subscriber Agreement")
flag.StringVar(&certmagic.CA, "ca", certmagic.CA, "URL to certificate authority's ACME server directory")
flag.BoolVar(&certmagic.DisableHTTPChallenge, "disable-http-challenge", certmagic.DisableHTTPChallenge, "Disable the ACME HTTP challenge")
flag.BoolVar(&certmagic.DisableTLSALPNChallenge, "disable-tls-alpn-challenge", certmagic.DisableTLSALPNChallenge, "Disable the ACME TLS-ALPN challenge")
flag.StringVar(&disabledMetrics, "disabled-metrics", "", "Comma-separated list of telemetry metrics to disable")
flag.StringVar(&conf, "conf", "", "Caddyfile to load (default \""+caddy.DefaultConfigFile+"\")")
flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
flag.StringVar(&envFile, "env", "", "Path to file with environment variables to load in KEY=VALUE format")
flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
flag.StringVar(&certmagic.Email, "email", "", "Default ACME CA account email address")
flag.DurationVar(&certmagic.HTTPTimeout, "catimeout", certmagic.HTTPTimeout, "Default ACME CA HTTP timeout")
flag.StringVar(&logfile, "log", "", "Process log file")
flag.IntVar(&logRollMB, "log-roll-mb", 100, "Roll process log when it reaches this many megabytes (0 to disable rolling)")
flag.BoolVar(&logRollCompress, "log-roll-compress", true, "Gzip-compress rolled process log files")
flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file")
flag.BoolVar(&caddy.Quiet, "quiet", false, "Quiet mode (no initialization output)")
flag.StringVar(&revoke, "revoke", "", "Hostname for which to revoke the certificate")
flag.StringVar(&serverType, "type", "http", "Type of server to run")
flag.BoolVar(&version, "version", false, "Show version")
flag.BoolVar(&validate, "validate", false, "Parse the Caddyfile but do not start the server")
caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader))
caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader))
}
// Run is Caddy's main() function.
func Run() {
flag.Parse()
caddy.AppName = appName
caddy.AppVersion = appVersion
certmagic.UserAgent = appName + "/" + appVersion
// Set up process log before anything bad happens
switch logfile {
case "stdout":
log.SetOutput(os.Stdout)
case "stderr":
log.SetOutput(os.Stderr)
case "":
log.SetOutput(ioutil.Discard)
default:
if logRollMB > 0 {
log.SetOutput(&lumberjack.Logger{
Filename: logfile,
MaxSize: logRollMB,
MaxAge: 14,
MaxBackups: 10,
Compress: logRollCompress,
})
} else {
err := os.MkdirAll(filepath.Dir(logfile), 0755)
if err != nil {
mustLogFatalf("%v", err)
}
f, err := os.OpenFile(logfile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
mustLogFatalf("%v", err)
}
// don't close file; log should be writeable for duration of process
log.SetOutput(f)
}
}
//Load all additional envs as soon as possible
if err := LoadEnvFromFile(envFile); err != nil {
mustLogFatalf("%v", err)
}
// initialize telemetry client
if EnableTelemetry {
err := initTelemetry()
if err != nil {
mustLogFatalf("[ERROR] Initializing telemetry: %v", err)
}
} else if disabledMetrics != "" {
mustLogFatalf("[ERROR] Cannot disable specific metrics because telemetry is disabled")
}
// Check for one-time actions
if revoke != "" {
err := caddytls.Revoke(revoke)
if err != nil {
mustLogFatalf("%v", err)
}
fmt.Printf("Revoked certificate for %s\n", revoke)
os.Exit(0)
}
if version {
fmt.Printf("%s %s (unofficial)\n", appName, appVersion)
if devBuild && gitShortStat != "" {
fmt.Printf("%s\n%s\n", gitShortStat, gitFilesModified)
}
os.Exit(0)
}
if plugins {
fmt.Println(caddy.DescribePlugins())
os.Exit(0)
}
// Set CPU cap
err := setCPU(cpu)
if err != nil {
mustLogFatalf("%v", err)
}
// Executes Startup events
caddy.EmitEvent(caddy.StartupEvent, nil)
// Get Caddyfile input
caddyfileinput, err := caddy.LoadCaddyfile(serverType)
if err != nil {
mustLogFatalf("%v", err)
}
if validate {
err := caddy.ValidateAndExecuteDirectives(caddyfileinput, nil, true)
if err != nil {
mustLogFatalf("%v", err)
}
msg := "Caddyfile is valid"
fmt.Println(msg)
log.Printf("[INFO] %s", msg)
os.Exit(0)
}
// Start your engines
instance, err := caddy.Start(caddyfileinput)
if err != nil {
mustLogFatalf("%v", err)
}
// Begin telemetry (these are no-ops if telemetry disabled)
telemetry.Set("caddy_version", appVersion)
telemetry.Set("num_listeners", len(instance.Servers()))
telemetry.Set("server_type", serverType)
telemetry.Set("os", runtime.GOOS)
telemetry.Set("arch", runtime.GOARCH)
telemetry.Set("cpu", struct {
BrandName string `json:"brand_name,omitempty"`
NumLogical int `json:"num_logical,omitempty"`
AESNI bool `json:"aes_ni,omitempty"`
}{
BrandName: cpuid.CPU.BrandName,
NumLogical: runtime.NumCPU(),
AESNI: cpuid.CPU.AesNi(),
})
if containerized := detectContainer(); containerized {
telemetry.Set("container", containerized)
}
telemetry.StartEmitting()
// Twiddle your thumbs
instance.Wait()
}
// mustLogFatalf wraps log.Fatalf() in a way that ensures the
// output is always printed to stderr so the user can see it
// if the user is still there, even if the process log was not
// enabled. If this process is an upgrade, however, and the user
// might not be there anymore, this just logs to the process
// log and exits.
func mustLogFatalf(format string, args ...interface{}) {
if !caddy.IsUpgrade() {
log.SetOutput(os.Stderr)
}
log.Fatalf(format, args...)
}
// confLoader loads the Caddyfile using the -conf flag.
func confLoader(serverType string) (caddy.Input, error) {
if conf == "" {
return nil, nil
}
if conf == "stdin" {
return caddy.CaddyfileFromPipe(os.Stdin, serverType)
}
var contents []byte
if strings.Contains(conf, "*") {
// Let caddyfile.doImport logic handle the globbed path
contents = []byte("import " + conf)
} else {
var err error
contents, err = ioutil.ReadFile(conf)
if err != nil {
return nil, err
}
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: conf,
ServerTypeName: serverType,
}, nil
}
// defaultLoader loads the Caddyfile from the current working directory.
func defaultLoader(serverType string) (caddy.Input, error) {
contents, err := ioutil.ReadFile(caddy.DefaultConfigFile)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: caddy.DefaultConfigFile,
ServerTypeName: serverType,
}, nil
}
// setVersion figures out the version information
// based on variables set by -ldflags.
func setVersion() {
// A development build is one that's not at a tag or has uncommitted changes
devBuild = gitTag == "" || gitShortStat != ""
if buildDate != "" {
buildDate = " " + buildDate
}
// Only set the appVersion if -ldflags was used
if gitNearestTag != "" || gitTag != "" {
if devBuild && gitNearestTag != "" {
appVersion = fmt.Sprintf("%s (+%s%s)",
strings.TrimPrefix(gitNearestTag, "v"), gitCommit, buildDate)
} else if gitTag != "" {
appVersion = strings.TrimPrefix(gitTag, "v")
}
}
}
// setCPU parses string cpu and sets GOMAXPROCS
// according to its value. It accepts either
// a number (e.g. 3) or a percent (e.g. 50%).
// If the percent resolves to less than a single
// GOMAXPROCS, it rounds it up to GOMAXPROCS=1.
func setCPU(cpu string) error {
var numCPU int
availCPU := runtime.NumCPU()
if strings.HasSuffix(cpu, "%") {
// Percent
var percent float32
pctStr := cpu[:len(cpu)-1]
pctInt, err := strconv.Atoi(pctStr)
if err != nil || pctInt < 1 || pctInt > 100 {
return errors.New("invalid CPU value: percentage must be between 1-100")
}
percent = float32(pctInt) / 100
numCPU = int(float32(availCPU) * percent)
if numCPU < 1 {
numCPU = 1
}
} else {
// Number
num, err := strconv.Atoi(cpu)
if err != nil || num < 1 {
return errors.New("invalid CPU value: provide a number or percent greater than 0")
}
numCPU = num
}
if numCPU > availCPU {
numCPU = availCPU
}
runtime.GOMAXPROCS(numCPU)
return nil
}
// detectContainer attempts to determine whether the process is
// being run inside a container. References:
// https://tuhrig.de/how-to-know-you-are-inside-a-docker-container/
// https://stackoverflow.com/a/20012536/1048862
// https://gist.github.com/anantkamath/623ce7f5432680749e087cf8cfba9b69
func detectContainer() bool {
if runtime.GOOS != "linux" {
return false
}
file, err := os.Open("/proc/1/cgroup")
if err != nil {
return false
}
defer file.Close()
i := 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
i++
if i > 1000 {
return false
}
line := scanner.Text()
parts := strings.SplitN(line, ":", 3)
if len(parts) < 3 {
continue
}
if strings.Contains(parts[2], "docker") ||
strings.Contains(parts[2], "lxc") ||
strings.Contains(parts[2], "moby") {
return true
}
}
return false
}
// initTelemetry initializes the telemetry engine.
func initTelemetry() error {
uuidFilename := filepath.Join(caddy.AssetsPath(), "uuid")
if customUUIDFile := os.Getenv("CADDY_UUID_FILE"); customUUIDFile != "" {
uuidFilename = customUUIDFile
}
newUUID := func() uuid.UUID {
id := uuid.New()
err := os.MkdirAll(caddy.AssetsPath(), 0700)
if err != nil {
log.Printf("[ERROR] Persisting instance UUID: %v", err)
return id
}
err = ioutil.WriteFile(uuidFilename, []byte(id.String()), 0600) // human-readable as a string
if err != nil {
log.Printf("[ERROR] Persisting instance UUID: %v", err)
}
return id
}
var id uuid.UUID
// load UUID from storage, or create one if we don't have one
if uuidFile, err := os.Open(uuidFilename); os.IsNotExist(err) {
// no UUID exists yet; create a new one and persist it
id = newUUID()
} else if err != nil {
log.Printf("[ERROR] Loading persistent UUID: %v", err)
id = newUUID()
} else {
defer uuidFile.Close()
uuidBytes, err := ioutil.ReadAll(uuidFile)
if err != nil {
log.Printf("[ERROR] Reading persistent UUID: %v", err)
id = newUUID()
} else {
id, err = uuid.ParseBytes(uuidBytes)
if err != nil {
log.Printf("[ERROR] Parsing UUID: %v", err)
id = newUUID()
}
}
}
// parse and check the list of disabled metrics
var disabledMetricsSlice []string
if len(disabledMetrics) > 0 {
if len(disabledMetrics) > 1024 {
// mitigate disk space exhaustion at the collection endpoint
return fmt.Errorf("too many metrics to disable")
}
disabledMetricsSlice = strings.Split(disabledMetrics, ",")
for i, metric := range disabledMetricsSlice {
if metric == "instance_id" || metric == "timestamp" || metric == "disabled_metrics" {
return fmt.Errorf("instance_id, timestamp, and disabled_metrics cannot be disabled")
}
if metric == "" {
disabledMetricsSlice = append(disabledMetricsSlice[:i], disabledMetricsSlice[i+1:]...)
}
}
}
// initialize telemetry
telemetry.Init(id, disabledMetricsSlice)
// if any metrics were disabled, report which ones (so we know how representative the data is)
if len(disabledMetricsSlice) > 0 {
telemetry.Set("disabled_metrics", disabledMetricsSlice)
log.Printf("[NOTICE] The following telemetry metrics are disabled: %s", disabledMetrics)
}
return nil
}
// LoadEnvFromFile loads additional envs if file provided and exists
// Envs in file should be in KEY=VALUE format
func LoadEnvFromFile(envFile string) error {
if envFile == "" {
return nil
}
file, err := os.Open(envFile)
if err != nil {
return err
}
defer file.Close()
envMap, err := ParseEnvFile(file)
if err != nil {
return err
}
for k, v := range envMap {
if err := os.Setenv(k, v); err != nil {
return err
}
}
return nil
}
// ParseEnvFile implements parse logic for environment files
func ParseEnvFile(envInput io.Reader) (map[string]string, error) {
envMap := make(map[string]string)
scanner := bufio.NewScanner(envInput)
var line string
lineNumber := 0
for scanner.Scan() {
line = strings.TrimSpace(scanner.Text())
lineNumber++
// skip lines starting with comment
if strings.HasPrefix(line, "#") {
continue
}
// skip empty line
if len(line) == 0 {
continue
}
fields := strings.SplitN(line, "=", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("Can't parse line %d; line should be in KEY=VALUE format", lineNumber)
}
if strings.Contains(fields[0], " ") {
return nil, fmt.Errorf("Can't parse line %d; KEY contains whitespace", lineNumber)
}
key := fields[0]
val := fields[1]
if key == "" {
return nil, fmt.Errorf("Can't parse line %d; KEY can't be empty string", lineNumber)
}
envMap[key] = val
}
if err := scanner.Err(); err != nil {
return nil, err
}
return envMap, nil
}
const appName = "Caddy"
// Flags that control program flow or startup
var (
serverType string
conf string
cpu string
envFile string
logfile string
logRollMB int
logRollCompress bool
revoke string
version bool
plugins bool
validate bool
disabledMetrics string
)
// Build information obtained with the help of -ldflags
var (
appVersion = "(untracked dev build)" // inferred at startup
devBuild = true // inferred at startup
buildDate string // date -u
gitTag string // git describe --exact-match HEAD 2> /dev/null
gitNearestTag string // git describe --abbrev=0 --tags HEAD
gitCommit string // git rev-parse HEAD
gitShortStat string // git diff-index --shortstat
gitFilesModified string // git diff-index --name-only HEAD
)
// EnableTelemetry defines whether telemetry is enabled in Run.
var EnableTelemetry = true
|
[
"\"CADDY_UUID_FILE\""
] |
[] |
[
"CADDY_UUID_FILE"
] |
[]
|
["CADDY_UUID_FILE"]
|
go
| 1 | 0 | |
noxfile.py
|
"""
*nox*
Parameters for nox.
"""
import nox
from nox_poetry import Session
from nox_poetry import session
from pathlib import Path
from textwrap import dedent
import sys
import shutil
package = "{{cookiecutter.package_name}}"
python_versions = [
"3.9",
"3.8",
"3.7",
]
nox.needs_version = ">= 2021.6.6"
nox.options.sessions = (
"pre-commit",
# "mypy",
# "typeguard",
# "security",
# "tests",
# "examples",
# "docs-build",
)
"""
*activate*
Activates venv using precommit hooks.
"""
def activate(
session: Session,
):
if session.bin is None:
return
virtualenv = session.env.get("VIRTUAL_ENV")
if virtualenv is None:
return
hookdir = Path(".git") / "hooks"
if not hookdir.is_dir():
return
for hook in hookdir.iterdir():
if hook.name.endswith(".sample") or not hook.is_file():
continue
text = hook.read_text()
bindir = repr(session.bin)[1:-1] # strip quotes
if not (
(Path("A") == Path("a") and bindir.lower() in text.lower())
or (bindir in text),
):
continue
lines = text.splitlines()
if not (lines[0].startswith("#!") and "python" in lines[0].lower()):
continue
header = dedent(
f"""\
import os
os.environ["VIRTUAL_ENV"] = {virtualenv!r}
os.environ["PATH"] = os.pathsep.join((
{session.bin!r},
os.environ.get("PATH", ""),
))
"""
)
lines.insert(1, header)
hook.write_text("\n".join(lines))
"""
*lint*
Lint using pre-commit.
"""
@session(
name="pre-commit",
python="3.9",
)
def lint(
session: Session,
):
args = session.posargs or [
"run",
"--all-files",
"--show-diff-on-failure",
]
session.install(
"black",
"darglint",
"flake8",
"flake8-bandit",
"flake8-bugbear",
"flake8-docstrings",
"flake8-rst-docstrings",
"pep8-naming",
"pre-commit",
"pre-commit-hooks",
"reorder-python-imports",
)
session.run("pre-commit", *args)
if args and args[0] == "install":
activate(session)
# """
# *static-types*
# Check for well-typedness through _mypy_.
# """
# @session(
# python=python_versions,
# )
# def mypy(
# session: Session,
# ) -> None:
# args = session.posargs or [
# "src",
# "tests",
# "docs/conf.py",
# ]
# session.install(".")
# session.install(
# "mypy",
# "pytest",
# )
# session.run(
# "mypy",
# *args,
# )
# if not session.posargs:
# session.run(
# "mypy",
# f"--python-executable={sys.executable}",
# "noxfile.py",
# )
# """
# *tests*
# Runs the test suite with _pytest_.
# """
# @session(
# python=[
# "3.9",
# "3.8",
# "3.7",
# ]
# )
# def tests(
# session: Session,
# ) -> None:
# session.install(".")
# session.install(
# "coverage[toml]",
# "pytest",
# "pygments",
# )
# try:
# session.run(
# "coverage",
# "run",
# "--parallel",
# "-m",
# "pytest",
# *session.posargs,
# )
# finally:
# if session.interactive:
# session.notify(
# "coverage",
# posargs=[],
# )
# """
# *runtime-types*
# Checks for type safety at runtime with _typeguard_.
# """
# @session(
# python=python_versions,
# )
# def typeguard(
# session: Session,
# ):
# session.install(".")
# session.install(
# "pytest",
# "typeguard",
# "pygments",
# )
# session.run(
# "pytest",
# f"--typeguard-packages={package}",
# *session.posargs,
# )
# """
# *security*
# Scans dependencies for insecure packages through _safety_.
# """
# @session(python="3.9")
# def security(
# session: Session,
# ) -> None:
# requirements = session.poetry.export_requirements()
# session.install("safety")
# session.run(
# "safety",
# "check",
# "--full-report",
# f"--file={requirements}",
# )
# """
# *coverage*
# Analyzes code coverage with _coverage_.
# """
# @session
# def coverage(
# session: Session,
# ) -> None:
# args = session.posargs or ["report"]
# session.install("coverage[toml]")
# if not session.posargs and any(Path().glob(".coverage.*")):
# session.run("coverage", "combine")
# session.run("coverage", *args)
# """
# *docs-build*
# Build the docs.
# """
# @session(
# name="docs-build",
# python="3.9",
# )
# def docs_build(
# session: Session,
# ) -> None:
# """Build the documentation."""
# args = session.posargs or [
# "docs",
# "docs/_build",
# ]
# session.install(".")
# session.install(
# "sphinx",
# "sphinx-click",
# "sphinx-rtd-theme",
# )
# build_dir = Path(
# "docs",
# "_build",
# )
# if build_dir.exists():
# shutil.rmtree(build_dir)
# session.run(
# "sphinx-build",
# *args,
# )
# """
# *build-with-view*
# Build and serve the documentation with live reloading on changes.
# """
# @session(
# python="3.9",
# )
# def docs(
# session: Session,
# ) -> None:
# args = session.posargs or [
# "--open-browser",
# "docs",
# "docs/_build",
# ]
# session.install(".")
# session.install(
# "sphinx",
# "sphinx-autobuild",
# "sphinx-click",
# "sphinx-rtd-theme",
# )
# build_dir = Path("docs", "_build")
# if build_dir.exists():
# shutil.rmtree(build_dir)
# session.run(
# "sphinx-autobuild",
# *args,
# )
# """
# *examples*
# Run examples with xdoctest.
# """
# @session(
# python=python_versions,
# )
# def examples(
# session: Session,
# ) -> None:
# args = session.posargs or ["all"]
# session.install(".")
# session.install("xdoctest[colors]")
# session.run(
# "python",
# "-m",
# "xdoctest",
# package,
# *args,
# )
|
[] |
[] |
[
"VIRTUAL_ENV",
"PATH"
] |
[]
|
["VIRTUAL_ENV", "PATH"]
|
python
| 2 | 0 | |
.history/pkg/magnolia/client_identity_identifier_20211118100852.go
|
package magnolia
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"github.com/ory/hydra/internal/logger"
api "github.com/ory/hydra/pkg/magnolia/v1"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type Config struct {
apiServerAddress string
apiKey string
apiSecret string
}
type Client struct {
config *Config
conn *grpc.ClientConn
}
func NewMagnoliaClient() *Client {
config, err := loadConfigFromEnv()
if err != nil {
logger.Get().Warnw("can't connect to the magnolia v1 server", zap.Error(err))
os.Exit(1)
}
return &Client{
config: config,
conn: connectSecureServer(config),
}
}
func loadConfigFromEnv() (*Config, error) {
apiServerAddress := os.Getenv("API_SERVER_ADDRESS")
apiKey := os.Getenv("API_KEY")
apiSecret := os.Getenv("API_SECRET")
if apiServerAddress == "" || apiKey == "" || apiSecret == "" {
return nil, errors.New("no required environment variables")
}
return &Config{
apiServerAddress: apiServerAddress,
apiKey: apiKey,
apiSecret: apiSecret,
}, nil
}
func connectSecureServer(config *Config) *grpc.ClientConn {
conn, err := grpc.Dial(config.apiServerAddress, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
logger.Get().Warnw("can't connect to the magnolia v1 server", zap.Error(err))
os.Exit(1)
}
return conn
}
func (c *Client) Close() {
c.conn.Close()
}
func (c *Client) GetIdentityIdentifier(name string) (*api.IdentityIdentifier, error) {
client := api.NewEntropyServiceClient(c.conn)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
md := metadata.Pairs("authorization", fmt.Sprintf("%s %v:%v", "bearer", c.config.apiKey, c.config.apiSecret))
ctx = metautils.NiceMD(md).ToOutgoing(ctx)
defer cancel()
resp, err := client.GetIdentityIdentifier(ctx, &api.IdentityIdentifierRequest{Name: name})
if err != nil {
return nil, err
}
logger.Get().Infow("get identity identifier", zap.Any("data", resp.Data))
return resp.Data, nil
}
func (c *Client) Support(id string) bool {
availableNamespaces := c.AvailableNamespaces()
if len(availableNamespaces) <= 0 {
return false
}
for _, namespace := range availableNamespaces {
if strings.HasSuffix(id, namespace) {
return true
}
}
return false
}
func (c *Client) CreateIdentityIdentifier(entity *api.IdentityIdentifier) (*api.IdentityIdentifier, error) {
client := api.NewEntropyServiceClient(c.conn)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
md := metadata.Pairs("authorization", fmt.Sprintf("%s %v:%v", "bearer", c.config.apiKey, c.config.apiSecret))
ctx = metautils.NiceMD(md).ToOutgoing(ctx)
defer cancel()
if !c.Support(entity.GetId()) {
return nil, errors.New("no available namespaces")
}
resp, err := client.CreateIdentityIdentifier(ctx, &api.CreateIdentityIdentifierRequest{
Id: entity.GetId(),
Name: entity.GetName(),
Email: entity.GetEmail(),
PublicKey: entity.GetPublicKey(),
Signature: entity.GetSignature(),
})
if err != nil {
return nil, err
}
if resp.Result.StatusCode != 200 {
return nil, errors.New(resp.Result.Message)
}
logger.Get().Infow("create identity identifier", zap.Any("data", resp.Data))
return resp.Data, nil
}
func (c *Client) DeleteIdentityIdentifier(id string) error {
client := api.NewEntropyServiceClient(c.conn)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
md := metadata.Pairs("authorization", fmt.Sprintf("%s %v:%v", "bearer", c.config.apiKey, c.config.apiSecret))
ctx = metautils.NiceMD(md).ToOutgoing(ctx)
defer cancel()
resp, err := client.DeleteIdentityIdentifier(ctx, &api.IdentityIdentifierRequest{Name: id})
if err != nil {
return err
}
if resp.Result.StatusCode != 200 {
return errors.New(resp.Result.Message)
}
logger.Get().Infow("delete identity identifier", zap.Any("data", id))
return nil
}
func (c *Client) AvailableNamespaces() []string {
client := api.NewEntropyServiceClient(c.conn)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
md := metadata.Pairs("authorization", fmt.Sprintf("%s %v:%v", "bearer", c.config.apiKey, c.config.apiSecret))
ctx = metautils.NiceMD(md).ToOutgoing(ctx)
defer cancel()
var result []string
resp, err := client.AvailableNamespace(ctx, &api.GeneralPaginationRequest{Pagination: &api.Pagination{
Limit: 10,
Offset: 0,
}})
if err != nil {
println(err.Error())
return result
}
if resp.Result.StatusCode != 200 {
println(resp.Result.Message)
return result
}
namespaces := resp.Data
println(len(namespaces))
for _, ns := range namespaces {
result = append(result, ns.Id)
}
return result
}
|
[
"\"API_SERVER_ADDRESS\"",
"\"API_KEY\"",
"\"API_SECRET\""
] |
[] |
[
"API_KEY",
"API_SECRET",
"API_SERVER_ADDRESS"
] |
[]
|
["API_KEY", "API_SECRET", "API_SERVER_ADDRESS"]
|
go
| 3 | 0 | |
libmachine/persist/filestore_test.go
|
package persist
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"testing"
_ "github.com/docker/machine/drivers/none"
"github.com/docker/machine/libmachine/hosttest"
)
func cleanup() {
os.RemoveAll(os.Getenv("MACHINE_STORAGE_PATH"))
}
func getTestStore() Filestore {
tmpDir, err := ioutil.TempDir("", "machine-test-")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
os.Setenv("MACHINE_STORAGE_PATH", tmpDir)
return Filestore{
Path: tmpDir,
CaCertPath: filepath.Join(tmpDir, "certs", "ca-cert.pem"),
CaPrivateKeyPath: filepath.Join(tmpDir, "certs", "ca-key.pem"),
}
}
func TestStoreSave(t *testing.T) {
defer cleanup()
store := getTestStore()
h, err := hosttest.GetDefaultTestHost()
if err != nil {
t.Fatal(err)
}
if err := store.Save(h); err != nil {
t.Fatal(err)
}
path := filepath.Join(store.getMachinesDir(), h.Name)
if _, err := os.Stat(path); os.IsNotExist(err) {
t.Fatalf("Host path doesn't exist: %s", path)
}
}
func TestStoreRemove(t *testing.T) {
defer cleanup()
store := getTestStore()
h, err := hosttest.GetDefaultTestHost()
if err != nil {
t.Fatal(err)
}
if err := store.Save(h); err != nil {
t.Fatal(err)
}
path := filepath.Join(store.getMachinesDir(), h.Name)
if _, err := os.Stat(path); os.IsNotExist(err) {
t.Fatalf("Host path doesn't exist: %s", path)
}
err = store.Remove(h.Name, false)
if err != nil {
t.Fatal(err)
}
if _, err := os.Stat(path); err == nil {
t.Fatalf("Host path still exists after remove: %s", path)
}
}
func TestStoreList(t *testing.T) {
defer cleanup()
store := getTestStore()
h, err := hosttest.GetDefaultTestHost()
if err != nil {
t.Fatal(err)
}
if err := store.Save(h); err != nil {
t.Fatal(err)
}
hosts, err := store.List()
if len(hosts) != 1 {
t.Fatalf("List returned %d items, expected 1", len(hosts))
}
if hosts[0].Name != h.Name {
t.Fatalf("hosts[0] name is incorrect, got: %s", hosts[0].Name)
}
}
func TestStoreExists(t *testing.T) {
defer cleanup()
store := getTestStore()
h, err := hosttest.GetDefaultTestHost()
if err != nil {
t.Fatal(err)
}
exists, err := store.Exists(h.Name)
if exists {
t.Fatal("Host should not exist before saving")
}
if err := store.Save(h); err != nil {
t.Fatal(err)
}
exists, err = store.Exists(h.Name)
if err != nil {
t.Fatal(err)
}
if !exists {
t.Fatal("Host should exist after saving")
}
if err := store.Remove(h.Name, true); err != nil {
t.Fatal(err)
}
exists, err = store.Exists(h.Name)
if err != nil {
t.Fatal(err)
}
if exists {
t.Fatal("Host should not exist after removing")
}
}
func TestStoreLoad(t *testing.T) {
defer cleanup()
expectedURL := "unix:///foo/baz"
flags := hosttest.GetTestDriverFlags()
flags.Data["url"] = expectedURL
store := getTestStore()
h, err := hosttest.GetDefaultTestHost()
if err != nil {
t.Fatal(err)
}
if err := h.Driver.SetConfigFromFlags(flags); err != nil {
t.Fatal(err)
}
if err := store.Save(h); err != nil {
t.Fatal(err)
}
h, err = store.Load(h.Name)
if err != nil {
log.Fatal(err)
}
actualURL, err := h.GetURL()
if err != nil {
t.Fatal(err)
}
if actualURL != expectedURL {
t.Fatalf("GetURL is not %q, got %q", expectedURL, actualURL)
}
}
|
[
"\"MACHINE_STORAGE_PATH\""
] |
[] |
[
"MACHINE_STORAGE_PATH"
] |
[]
|
["MACHINE_STORAGE_PATH"]
|
go
| 1 | 0 | |
test/unit/test_Xchanged_data_shims.py
|
import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
import Xchangelib
@pytest.fixture
def sentinel_proposal_hex():
return '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d'
@pytest.fixture
def sentinel_superblock_hex():
return '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d'
@pytest.fixture
def Xchanged_proposal_hex():
return '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d5d'
@pytest.fixture
def Xchanged_superblock_hex():
return '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
# ========================================================================
def test_SHIM_deserialise_from_Xchanged(Xchanged_proposal_hex, Xchanged_superblock_hex):
assert Xchangelib.SHIM_deserialise_from_Xchanged(Xchanged_proposal_hex) == '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d'
assert Xchangelib.SHIM_deserialise_from_Xchanged(Xchanged_superblock_hex) == '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d'
def test_SHIM_serialise_for_Xchanged(sentinel_proposal_hex, sentinel_superblock_hex):
assert Xchangelib.SHIM_serialise_for_Xchanged(sentinel_proposal_hex) == '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d5d'
assert Xchangelib.SHIM_serialise_for_Xchanged(sentinel_superblock_hex) == '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
|
[] |
[] |
[
"SENTINEL_CONFIG"
] |
[]
|
["SENTINEL_CONFIG"]
|
python
| 1 | 0 | |
debug/collector/main.go
|
package main
import (
"os"
"path"
"github.com/dreamlu/go-micro/v2"
log "github.com/dreamlu/go-micro/v2/logger"
plugin "github.com/dreamlu/micro/v2/debug/collector/micro"
"github.com/netdata/go-orchestrator"
"github.com/netdata/go-orchestrator/cli"
"github.com/netdata/go-orchestrator/pkg/multipath"
)
var (
cd, _ = os.Getwd()
netdataConfig = multipath.New(
os.Getenv("NETDATA_USER_CONFIG_DIR"),
os.Getenv("NETDATA_STOCK_CONFIG_DIR"),
path.Join(cd, "/../../../../etc/netdata"),
path.Join(cd, "/../../../../usr/lib/netdata/conf.d"),
)
)
func main() {
// New Service
service := micro.NewService(
micro.Name("go.micro.debug.collector"),
micro.Version("latest"),
)
if len(os.Args) > 1 {
os.Args = append(os.Args[:1], os.Args[2:]...)
}
// Initialise service
service.Init()
go func() {
log.Fatal(service.Run())
}()
// register the new plugin
plugin.New(service.Client()).Register()
netdata := orchestrator.New()
netdata.Name = "micro.d"
netdata.Option = &cli.Option{
UpdateEvery: 1,
Debug: true,
Module: "all",
ConfigDir: netdataConfig,
Version: false,
}
netdata.ConfigPath = netdataConfig
if !netdata.Setup() {
log.Fatal("Netdata failed to Setup()")
}
netdata.Serve()
}
|
[
"\"NETDATA_USER_CONFIG_DIR\"",
"\"NETDATA_STOCK_CONFIG_DIR\""
] |
[] |
[
"NETDATA_STOCK_CONFIG_DIR",
"NETDATA_USER_CONFIG_DIR"
] |
[]
|
["NETDATA_STOCK_CONFIG_DIR", "NETDATA_USER_CONFIG_DIR"]
|
go
| 2 | 0 | |
SentyectorAPI/SentyectorAPI/wsgi.py
|
"""
WSGI config for SentyectorAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SentyectorAPI.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
security-admin/scripts/db_setup.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
import os
import re
import sys
import errno
import shlex
import platform
import logging
import subprocess
import fileinput
from os.path import basename
from subprocess import Popen,PIPE
from datetime import date
import time
import datetime
from time import gmtime, strftime
import socket
globalDict = {}
os_name = platform.system()
os_name = os_name.upper()
ranger_version=''
jisql_debug=True
retryPatchAfterSeconds=120
is_unix = os_name == "LINUX" or os_name == "DARWIN"
RANGER_ADMIN_HOME = os.getenv("RANGER_ADMIN_HOME")
if RANGER_ADMIN_HOME is None:
RANGER_ADMIN_HOME = os.getcwd()
if socket.getfqdn().find('.')>=0:
client_host=socket.getfqdn()
else:
client_host=socket.gethostbyaddr(socket.gethostname())[0]
RANGER_ADMIN_CONF = os.getenv("RANGER_ADMIN_CONF")
if RANGER_ADMIN_CONF is None:
if is_unix:
RANGER_ADMIN_CONF = RANGER_ADMIN_HOME
elif os_name == "WINDOWS":
RANGER_ADMIN_CONF = os.path.join(RANGER_ADMIN_HOME,'bin')
def check_output(query):
if is_unix:
p = subprocess.Popen(shlex.split(query), stdout=subprocess.PIPE)
elif os_name == "WINDOWS":
p = subprocess.Popen(query, stdout=subprocess.PIPE, shell=True)
output = p.communicate ()[0]
return output
def log(msg,type):
if type == 'info':
logging.info(" %s",msg)
if type == 'debug':
logging.debug(" %s",msg)
if type == 'warning':
logging.warning(" %s",msg)
if type == 'exception':
logging.exception(" %s",msg)
if type == 'error':
logging.error(" %s",msg)
def populate_global_dict():
global globalDict
if is_unix:
read_config_file = open(os.path.join(RANGER_ADMIN_CONF,'install.properties'))
elif os_name == "WINDOWS":
read_config_file = open(os.path.join(RANGER_ADMIN_CONF,'install_config.properties'))
library_path = os.path.join(RANGER_ADMIN_HOME,"cred","lib","*")
for each_line in read_config_file.read().split('\n') :
each_line = each_line.strip();
if len(each_line) == 0:
continue
elif each_line[0] == "#":
continue
if re.search('=', each_line):
key , value = each_line.split("=",1)
key = key.strip()
if 'PASSWORD' in key:
jceks_file_path = os.path.join(RANGER_ADMIN_HOME, 'jceks','ranger_db.jceks')
#statuscode,value = call_keystore(library_path,key,'',jceks_file_path,'get')
#if statuscode == 1:
value = ''
value = value.strip()
globalDict[key] = value
def jisql_log(query, db_password):
if jisql_debug == True:
if os_name == "WINDOWS":
query = query.replace(' -p "'+db_password+'"' , ' -p "********"')
log("[JISQL] "+query, "info")
else:
query = query.replace(" -p '"+db_password+"'" , " -p '********'")
log("[JISQL] "+query, "info")
def password_validation(password):
if password:
if re.search("[\\\`'\"]",password):
log("[E] password contains one of the unsupported special characters like \" ' \ `","error")
sys.exit(1)
def subprocessCallWithRetry(query):
retryCount=1
returnCode = subprocess.call(query)
while returnCode!=0:
retryCount=retryCount+1
time.sleep(1)
log("[I] SQL statement execution Failed!! retrying attempt "+str(retryCount)+" of total 3" ,"info")
returnCode = subprocess.call(query)
if(returnCode!=0 and retryCount>=3):
break
return returnCode
class BaseDB(object):
def check_connection(self, db_name, db_user, db_password):
log("[I] ---------- Verifying DB connection ----------", "info")
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
log("[I] ---------- Verifying table ----------", "info")
def import_db_file(self, db_name, db_user, db_password, file_name):
log("[I] Importing DB file :"+file_name, "info")
def create_version_history_table(self, db_name, db_user, db_password, DBVERSION_CATALOG_CREATION,TABLE_NAME):
log("[I] Creating version and patch history info table", "info")
def apply_patches(self, db_name, db_user, db_password, PATCHES_PATH):
#first get all patches and then apply each patch
if not os.path.exists(PATCHES_PATH):
log("[I] No patches to apply!","info")
else:
# files: coming from os.listdir() sorted alphabetically, thus not numerically
files = os.listdir(PATCHES_PATH)
if files:
sorted_files = sorted(files, key=lambda x: str(x.split('.')[0]))
for filename in sorted_files:
currentPatch = os.path.join(PATCHES_PATH, filename)
self.import_db_patches(db_name, db_user, db_password, currentPatch)
self.update_applied_patches_status(db_name, db_user, db_password, "DB_PATCHES")
else:
log("[I] No patches to apply!","info")
def auditdb_operation(self, xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] ----------------- Audit DB operations ------------", "info")
def apply_auditdb_patches(self, xa_sqlObj,xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, PATCHES_PATH, TABLE_NAME):
#first get all patches and then apply each patch
if not os.path.exists(PATCHES_PATH):
log("[I] No patches to apply!","info")
else:
# files: coming from os.listdir() sorted alphabetically, thus not numerically
files = os.listdir(PATCHES_PATH)
if files:
sorted_files = sorted(files, key=lambda x: str(x.split('.')[0]))
for filename in sorted_files:
currentPatch = os.path.join(PATCHES_PATH, filename)
self.import_auditdb_patches(xa_sqlObj, xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, currentPatch, TABLE_NAME)
else:
log("[I] No patches to apply!","info")
def execute_java_patches(xa_db_host, db_user, db_password, db_name):
log("[I] ----------------- Executing java patches ------------", "info")
def create_synonym(db_name, db_user, db_password,audit_db_user):
log("[I] ----------------- Creating Synonym ------------", "info")
def change_admin_default_password(xa_db_host, db_user, db_password, db_name,userName,oldPassword,newPassword):
log("[I] ----------------- Changing Ranger admin default password ------------", "info")
def import_core_db_schema(self, db_name, db_user, db_password, file_name,first_table,last_table):
log("[I] ---------- Importing Core DB Schema ----------", "info")
class MysqlConf(BaseDB):
# Constructor
def __init__(self, host,SQL_CONNECTOR_JAR,JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
self.db_ssl_enabled=db_ssl_enabled.lower()
self.db_ssl_required=db_ssl_required.lower()
self.db_ssl_verifyServerCertificate=db_ssl_verifyServerCertificate.lower()
self.javax_net_ssl_keyStore=javax_net_ssl_keyStore
self.javax_net_ssl_keyStorePassword=javax_net_ssl_keyStorePassword
self.javax_net_ssl_trustStore=javax_net_ssl_trustStore
self.javax_net_ssl_trustStorePassword=javax_net_ssl_trustStorePassword
def get_jisql_cmd(self, user, password ,db_name):
path = RANGER_ADMIN_HOME
db_ssl_param=''
db_ssl_cert_param=''
if self.db_ssl_enabled == 'true':
db_ssl_param="?useSSL=%s&requireSSL=%s&verifyServerCertificate=%s" %(self.db_ssl_enabled,self.db_ssl_required,self.db_ssl_verifyServerCertificate)
if self.db_ssl_verifyServerCertificate == 'true':
db_ssl_cert_param=" -Djavax.net.ssl.keyStore=%s -Djavax.net.ssl.keyStorePassword=%s -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s " %(self.javax_net_ssl_keyStore,self.javax_net_ssl_keyStorePassword,self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword)
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s %s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver mysqlconj -cstring jdbc:mysql://%s/%s%s -u '%s' -p '%s' -noheader -trim -c \;" %(self.JAVA_BIN,db_ssl_cert_param,self.SQL_CONNECTOR_JAR,path,self.host,db_name,db_ssl_param,user,password)
elif os_name == "WINDOWS":
jisql_cmd = "%s %s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver mysqlconj -cstring jdbc:mysql://%s/%s%s -u \"%s\" -p \"%s\" -noheader -trim" %(self.JAVA_BIN,db_ssl_cert_param,self.SQL_CONNECTOR_JAR, path, self.host, db_name, db_ssl_param,user, password)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection..", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"SELECT version();\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT version();\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('Production |'):
log("[I] Checking connection passed.", "info")
return True
else:
log("[E] Can't establish connection!! Exiting.." ,"error")
log("[I] Please run DB setup first or contact Administrator.." ,"info")
sys.exit(1)
def grant_audit_db_user(self, db_user, audit_db_name, audit_db_user, audit_db_password, db_password,TABLE_NAME):
hosts_arr =["%", "localhost"]
hosts_arr.append(self.host)
for host in hosts_arr:
log("[I] ---------------Granting privileges TO '"+ audit_db_user + "' on '" + audit_db_name+"'-------------" , "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, audit_db_name)
if is_unix:
query = get_cmd + " -query \"GRANT INSERT ON %s.%s TO '%s'@'%s';\"" %(audit_db_name,TABLE_NAME,audit_db_user,host)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT INSERT ON %s.%s TO '%s'@'%s';\" -c ;" %(audit_db_name,TABLE_NAME,audit_db_user,host)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret == 0:
log("[I] Granting privileges to '" + audit_db_user+"' done on '"+ audit_db_name+"'", "info")
else:
log("[E] Granting privileges to '" +audit_db_user+"' failed on '" + audit_db_name+"'", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
isImported=False
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " file imported successfully","info")
isImported=True
else:
log("[E] "+name + " file import failed!","error")
else:
log("[E] DB schema file " + name+ " not found","error")
sys.exit(1)
return isImported
def import_db_patches(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
sys.exit(1)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret!=0:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
ret=0
log("[I] Patch "+ name +" has been applied by some other process!" ,"info")
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
def import_auditdb_patches(self, xa_sqlObj,xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Checking XA_ACCESS_AUDIT table to apply audit db patches --------- ","info")
output = self.check_table(audit_db_name, db_user, db_password, TABLE_NAME)
if output == True:
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + audit_db_name + " from file: " + name,"info")
get_cmd1 = xa_sqlObj.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
sys.exit(1)
get_cmd2 = self.get_jisql_cmd(db_user, db_password, audit_db_name)
if is_unix:
query = get_cmd2 + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd2 + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
else:
log("[I] Table XA_ACCESS_AUDIT does not exists in " +audit_db_name,"error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"show tables like '%s';\"" %(TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"show tables like '%s';\" -c ;" %(TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME + " |"):
log("[I] Table " + TABLE_NAME +" already exists in database '" + db_name + "'","info")
return True
else:
log("[I] Table " + TABLE_NAME +" does not exist in database " + db_name + "","info")
return False
def auditdb_operation(self, xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Check ranger user connection ---------","info")
self.check_connection(audit_db_name, db_user, db_password)
log("[I] --------- Check audit table exists --------- ","info")
output = self.check_table(audit_db_name, db_user, db_password, TABLE_NAME)
if output == False:
self.import_db_file(audit_db_name ,db_user, db_password, file_name)
self.grant_audit_db_user(db_user, audit_db_name, audit_db_user, audit_db_password, db_password,TABLE_NAME)
def execute_java_patches(self, xa_db_host, db_user, db_password, db_name):
my_dict = {}
version = ""
className = ""
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
javaFiles = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch")
if not os.path.exists(javaFiles):
log("[I] No java patches to apply!","info")
else:
files = os.listdir(javaFiles)
if files:
for filename in files:
f = re.match("^Patch.*?.class$",filename)
if f:
version = re.match("Patch.*?_(.*).class",filename)
version = version.group(1)
key3 = int(version.strip("J"))
my_dict[key3] = filename
keylist = my_dict.keys()
keylist.sort()
for key in keylist:
#print "%s: %s" % (key, my_dict[key])
version = str(key)
className = my_dict[key]
className = className.strip(".class")
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Java patch "+ className +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Java patch "+ className +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', now(), '%s', now(), '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', now(), '%s', now(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is being applied..","info")
else:
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.%s"%(self.JAVA_BIN,ranger_log,path,className)
if is_unix:
ret = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
ret = subprocess.call(get_java_cmd)
if ret == 0:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is applied..","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] applying java patch "+ className +" failed", "error")
sys.exit(1)
def change_admin_default_password(self, xa_db_host, db_user, db_password, db_name,userName,oldPassword,newPassword):
my_dict = {}
version = ""
className = "ChangePasswordUtil"
version = 'DEFAULT_ADMIN_UPDATE'
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
filePath = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch","cliutil","ChangePasswordUtil.class")
if os.path.exists(filePath):
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Ranger admin default password has already been changed!!","info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Ranger Password change utility is being executed by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Ranger admin default password change request is in process..","info")
else:
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.cliutil.%s %s %s %s -default"%(self.JAVA_BIN,ranger_log,path,className,userName,oldPassword,newPassword)
if is_unix:
status = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
status = subprocess.call(get_java_cmd)
if status == 0 or status==2:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0 and status == 0:
log ("[I] Ranger admin default password change request processed successfully..","info")
elif ret == 0 and status == 2:
log ("[I] Ranger admin default password change request process skipped!","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
def create_version_history_table(self, db_name, db_user, db_password, file_name,table_name):
name = basename(file_name)
if os.path.isfile(file_name):
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
if isTableExist==False:
log("[I] Importing "+table_name+" table schema to database " + db_name + " from file: " + name,"info")
while(isTableExist==False):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " file imported successfully","info")
else:
log("[E] "+name + " file import failed!","error")
time.sleep(30)
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
else:
log("[E] Table schema file " + name+ " not found","error")
sys.exit(1)
def import_core_db_schema(self, db_name, db_user, db_password, file_name,first_table,last_table):
version = 'CORE_DB_SCHEMA'
if os.path.isfile(file_name):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] "+version+" is already imported" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" import failed", "error")
sys.exit(1)
isFirstTableExist = self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist = self.check_table(db_name, db_user, db_password, last_table)
isSchemaCreated=False
if isFirstTableExist == True and isLastTableExist == True :
isSchemaCreated=True
elif isFirstTableExist == False and isLastTableExist == False :
isImported=self.import_db_file(db_name, db_user, db_password, file_name)
if(isImported==False):
log("[I] "+ version +" might being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isLastTableExist==True):
isSchemaCreated=True
elif isFirstTableExist == False or isLastTableExist == False :
while(isFirstTableExist == False or isLastTableExist==False):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isFirstTableExist=self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isFirstTableExist==True and isLastTableExist==True):
isSchemaCreated=True
if isSchemaCreated == True:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+version +" import status has been updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating "+version +" import status failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+version + " import failed!","error")
sys.exit(1)
def hasPendingPatches(self, db_name, db_user, db_password, version):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active='Y';\"" %(version,ranger_version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active='Y';\" -c ;" %(version,ranger_version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
return False
else:
return True
def update_applied_patches_status(self,db_name, db_user, db_password,version):
if self.hasPendingPatches(db_name, db_user, db_password,version) == True:
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','Y') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','Y') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" status entry to x_db_version_h table failed", "error")
sys.exit(1)
else:
log("[I] "+ version +" status entry to x_db_version_h table completed", "info")
class OracleConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password):
path = RANGER_ADMIN_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if not re.search('-Djava.security.egd=file:///dev/urandom', self.JAVA_BIN):
self.JAVA_BIN = self.JAVA_BIN + " -Djava.security.egd=file:///dev/urandom "
#if self.host.count(":") == 2:
if self.host.count(":") == 2 or self.host.count(":") == 0:
#jdbc:oracle:thin:@[HOST][:PORT]:SID or #jdbc:oracle:thin:@GL
cstring="jdbc:oracle:thin:@%s" %(self.host)
else:
#jdbc:oracle:thin:@//[HOST][:PORT]/SERVICE
cstring="jdbc:oracle:thin:@//%s" %(self.host)
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver oraclethin -cstring %s -u '%s' -p '%s' -noheader -trim" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, cstring, user, password)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver oraclethin -cstring %s -u \"%s\" -p \"%s\" -noheader -trim" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, cstring, user, password)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select * from v$version;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select * from v$version;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('Production |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection!", "error")
sys.exit(1)
def grant_audit_db_user(self, audit_db_name ,db_user,audit_db_user,db_password,audit_db_password):
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query 'GRANT SELECT ON %s.XA_ACCESS_AUDIT_SEQ TO %s;'" % (db_user,audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT SELECT ON %s.XA_ACCESS_AUDIT_SEQ TO %s;\" -c ;" % (db_user,audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0:
sys.exit(1)
if is_unix:
query = get_cmd + " -c \; -query 'GRANT INSERT ON %s.XA_ACCESS_AUDIT TO %s;'" % (db_user,audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT INSERT ON %s.XA_ACCESS_AUDIT TO %s;\" -c ;" % (db_user,audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0:
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
isImported=False
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing script " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -input %s -c \;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " imported successfully","info")
isImported=True
else:
log("[E] "+name + " import failed!","error")
else:
log("[E] DB schema file " + name+ " not found","error")
sys.exit(1)
return isImported
def create_synonym(self,db_name, db_user, db_password,audit_db_user):
log("[I] ----------------- Creating Synonym ------------", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query 'CREATE OR REPLACE SYNONYM %s.XA_ACCESS_AUDIT FOR %s.XA_ACCESS_AUDIT;'" % (audit_db_user,db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"CREATE OR REPLACE SYNONYM %s.XA_ACCESS_AUDIT FOR %s.XA_ACCESS_AUDIT;\" -c ;" % (audit_db_user,db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0:
sys.exit(1)
if is_unix:
query = get_cmd + " -c \; -query 'CREATE OR REPLACE SYNONYM %s.XA_ACCESS_AUDIT_SEQ FOR %s.XA_ACCESS_AUDIT_SEQ;'" % (audit_db_user,db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"CREATE OR REPLACE SYNONYM %s.XA_ACCESS_AUDIT_SEQ FOR %s.XA_ACCESS_AUDIT_SEQ;\" -c ;" % (audit_db_user,db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0:
sys.exit(1)
def import_db_patches(self, db_name, db_user, db_password, file_name):
if os.path.isfile(file_name):
name = basename(file_name)
version = name.split('-')[0]
log("[I] Executing patch on " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version +" |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version +" |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -c \; -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\"" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\" -c ;" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
if is_unix:
query = get_cmd + " -input %s -c /" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c /" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version +" |"):
ret=0
log("[I] Patch "+ name +" has been applied by some other process!" ,"info")
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd + " -c \; -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " Import failed!","error")
sys.exit(1)
def import_auditdb_patches(self, xa_sqlObj,xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Checking XA_ACCESS_AUDIT table to apply audit db patches --------- ","info")
output = self.check_table(db_name, db_user, db_password, TABLE_NAME)
if output == True:
if os.path.isfile(file_name):
name = basename(file_name)
version = name.split('-')[0]
log("[I] Executing patch on " + audit_db_name + " from file: " + name,"info")
get_cmd1 = xa_sqlObj.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd1 + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version +" |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd1 + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version +" |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd1 + " -c \; -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\"" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\" -c ;" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
get_cmd2 = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd2 + " -input %s -c /" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd2 + " -input %s -c /" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd1 + " -c \; -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd1 + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd1 + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " Import failed!","error")
sys.exit(1)
else:
log("[I] Patch file not found","error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user ,db_password)
if is_unix:
query = get_cmd + " -c \; -query 'select default_tablespace from user_users;'"
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select default_tablespace from user_users;\" -c ;"
jisql_log(query, db_password)
output = check_output(query).strip()
output = output.strip(' |')
db_name = db_name.upper()
if output == db_name:
log("[I] User name " + db_user + " and tablespace " + db_name + " already exists.","info")
log("[I] Verifying table " + TABLE_NAME +" in tablespace " + db_name, "info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select UPPER(table_name) from all_tables where UPPER(tablespace_name)=UPPER('%s') and UPPER(table_name)=UPPER('%s');\"" %(db_name ,TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select UPPER(table_name) from all_tables where UPPER(tablespace_name)=UPPER('%s') and UPPER(table_name)=UPPER('%s');\" -c ;" %(db_name ,TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME.upper() + ' |'):
log("[I] Table " + TABLE_NAME +" already exists in tablespace " + db_name + "","info")
return True
else:
log("[I] Table " + TABLE_NAME +" does not exist in tablespace " + db_name + "","info")
return False
else:
log("[E] "+db_user + " user already assigned to some other tablespace , provide different DB name.","error")
sys.exit(1)
def auditdb_operation(self, xa_db_host , audit_db_host , db_name ,audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Check admin user connection ---------","info")
self.check_connection(db_name, db_user, db_password)
log("[I] --------- Check audit user connection ---------","info")
self.check_connection(audit_db_name, audit_db_user, audit_db_password)
log("[I] --------- Check table ---------","info")
if self.check_table(db_name, db_user, db_password, TABLE_NAME):
pass
else:
self.import_db_file(audit_db_name, db_user, db_password ,file_name)
log("[I] ---------------Granting privileges TO '"+ audit_db_user + "' on audit table-------------" , "info")
self.grant_audit_db_user( audit_db_name ,db_user, audit_db_user, db_password,audit_db_password)
def execute_java_patches(self, xa_db_host, db_user, db_password, db_name):
my_dict = {}
version = ""
className = ""
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
javaFiles = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch")
if not os.path.exists(javaFiles):
log("[I] No java patches to apply!","info")
else:
files = os.listdir(javaFiles)
if files:
for filename in files:
f = re.match("^Patch.*?.class$",filename)
if f:
className = re.match("(Patch.*?)_.*.class",filename)
className = className.group(1)
version = re.match("Patch.*?_(.*).class",filename)
version = version.group(1)
key3 = int(version.strip("J"))
my_dict[key3] = filename
keylist = my_dict.keys()
keylist.sort()
for key in keylist:
#print "%s: %s" % (key, my_dict[key])
version = str(key)
className = my_dict[key]
className = className.strip(".class")
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] java patch "+ className +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
#Fix to handle Ranger Upgrade failure for Oracle DB flavor
if is_unix:
queryUpgradeCaseCheck = get_cmd + " -c \; -query \"select version from x_db_version_h where version = 'J%s' and active = 'N' and inst_by!='%s';\"" %(version,ranger_version)
elif os_name == "WINDOWS":
queryUpgradeCaseCheck = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N' and inst_by!='%s';\" -c ;" %(version,ranger_version)
jisql_log(queryUpgradeCaseCheck, db_password)
outputUpgradeCaseCheck = check_output(queryUpgradeCaseCheck)
if outputUpgradeCaseCheck.strip(version + " |"):
if is_unix:
queryUpdate = get_cmd + " -c \; -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and inst_by!='%s';\"" %(version, ranger_version)
jisql_log(queryUpdate, db_password)
retUpdate = subprocess.call(shlex.split(queryUpdate))
elif os_name == "WINDOWS":
queryUpdate = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and inst_by!='%s';\" -c ;" %(version, ranger_version)
jisql_log(queryUpdate, db_password)
retUpdate = subprocess.call(queryUpdate)
if retUpdate == 0:
log ("[I] java patch "+ className +" status has been updated..","info")
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
#End of Upgrade failure fix
while(output.strip(version + " |")):
log("[I] Java patch "+ className +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -c \; -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'J%s', sysdate, '%s', sysdate, '%s','N');\"" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'J%s', sysdate, '%s', sysdate, '%s','N');\" -c ;" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is being applied..","info")
else:
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Djava.security.egd=file:///dev/urandom -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.%s"%(self.JAVA_BIN,ranger_log,path,className)
if is_unix:
ret = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
ret = subprocess.call(get_java_cmd)
if ret == 0:
if is_unix:
query = get_cmd + " -c \; -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\"" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is applied..","info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\"" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] applying java patch "+ className +" failed", "error")
sys.exit(1)
def change_admin_default_password(self, xa_db_host, db_user, db_password, db_name,userName,oldPassword,newPassword):
my_dict = {}
version = ""
className = "ChangePasswordUtil"
version = 'DEFAULT_ADMIN_UPDATE'
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
filePath = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch","cliutil","ChangePasswordUtil.class")
if os.path.exists(filePath):
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Ranger admin default password has already been changed!!","info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Ranger Password change utility is being executed by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -c \; -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\"" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\" -c ;" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Ranger admin default password change request is in process..","info")
else:
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.cliutil.%s %s %s %s -default"%(self.JAVA_BIN,ranger_log,path,className,userName,oldPassword,newPassword)
if is_unix:
status = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
status = subprocess.call(get_java_cmd)
if status == 0 or status==2:
if is_unix:
query = get_cmd + " -c \; -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0 and status == 0:
log ("[I] Ranger admin default password change request processed successfully..","info")
elif ret == 0 and status == 2:
log ("[I] Ranger admin default password change request process skipped!","info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
def create_version_history_table(self, db_name, db_user, db_password, file_name,table_name):
name = basename(file_name)
if os.path.isfile(file_name):
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
if isTableExist==False:
log("[I] Importing "+table_name+" table schema from file: " + name,"info")
while(isTableExist==False):
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -input %s -c \;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " file imported successfully","info")
else:
log("[E] "+name + " file import failed!","error")
time.sleep(30)
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
else:
log("[E] Table schema file " + name+ " not found","error")
sys.exit(1)
def import_core_db_schema(self, db_name, db_user, db_password, file_name,first_table,last_table):
version = 'CORE_DB_SCHEMA'
if os.path.isfile(file_name):
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] "+version+" is already imported" ,"info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -c \; -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\"" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','N');\" -c ;" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" import failed", "error")
sys.exit(1)
isFirstTableExist = self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist = self.check_table(db_name, db_user, db_password, last_table)
isSchemaCreated=False
if isFirstTableExist == True and isLastTableExist == True :
isSchemaCreated=True
elif isFirstTableExist == False and isLastTableExist == False :
isImported=self.import_db_file(db_name, db_user, db_password, file_name)
if(isImported==False):
log("[I] "+ version +" might being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isLastTableExist==True):
isSchemaCreated=True
elif isFirstTableExist == False or isLastTableExist == False :
while(isFirstTableExist==False or isLastTableExist == False):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isFirstTableExist=self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isFirstTableExist==True and isLastTableExist==True):
isSchemaCreated=True
if isSchemaCreated == True:
if is_unix:
query = get_cmd + " -c \; -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+version +" import status has been updated", "info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating "+version +" import status failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -c \; -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+version + " import failed!","error")
sys.exit(1)
def hasPendingPatches(self, db_name, db_user, db_password, version):
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\"" %(version,ranger_version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\" -c ;" %(version,ranger_version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
return False
else:
return True
def update_applied_patches_status(self,db_name, db_user, db_password,version):
if self.hasPendingPatches(db_name, db_user, db_password,version) == True:
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','Y');\"" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (id,version, inst_at, inst_by, updated_at, updated_by,active) values ( X_DB_VERSION_H_SEQ.nextval,'%s', sysdate, '%s', sysdate, '%s','Y');\" -c ;" %(version, ranger_version, client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" status entry to x_db_version_h table failed", "error")
sys.exit(1)
else:
log("[I] "+ version +" status entry to x_db_version_h table completed", "info")
class PostgresConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password, db_name):
#TODO: User array for forming command
path = RANGER_ADMIN_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver postgresql -cstring jdbc:postgresql://%s/%s -u %s -p '%s' -noheader -trim -c \;" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, self.host, db_name, user, password)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver postgresql -cstring jdbc:postgresql://%s/%s -u %s -p \"%s\" -noheader -trim" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, self.host, db_name, user, password)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
isImported=False
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " DB schema imported successfully","info")
isImported=True
else:
log("[E] "+name + " DB schema import failed!","error")
else:
log("[E] DB schema file " + name+ " not found","error")
sys.exit(1)
return isImported
def grant_audit_db_user(self, audit_db_name , db_user, audit_db_user, db_password, audit_db_password):
log("[I] Granting permission to " + audit_db_user, "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, audit_db_name)
log("[I] Granting select and usage privileges to Postgres audit user '" + audit_db_user + "' on XA_ACCESS_AUDIT_SEQ", "info")
if is_unix:
query = get_cmd + " -query 'GRANT SELECT,USAGE ON XA_ACCESS_AUDIT_SEQ TO %s;'" % (audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT SELECT,USAGE ON XA_ACCESS_AUDIT_SEQ TO %s;\" -c ;" % (audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0:
log("[E] Granting select privileges to Postgres user '" + audit_db_user + "' failed", "error")
sys.exit(1)
log("[I] Granting insert privileges to Postgres audit user '" + audit_db_user + "' on XA_ACCESS_AUDIT table", "info")
if is_unix:
query = get_cmd + " -query 'GRANT INSERT ON XA_ACCESS_AUDIT TO %s;'" % (audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT INSERT ON XA_ACCESS_AUDIT TO %s;\" -c ;" % (audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0:
log("[E] Granting insert privileges to Postgres user '" + audit_db_user + "' failed", "error")
sys.exit(1)
def create_language_plpgsql(self,db_user, db_password, db_name):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"SELECT 1 FROM pg_catalog.pg_language WHERE lanname='plpgsql';\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1 FROM pg_catalog.pg_language WHERE lanname='plpgsql';\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if not output.strip('1 |'):
if is_unix:
query = get_cmd + " -query \"CREATE LANGUAGE plpgsql;\""
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"CREATE LANGUAGE plpgsql;\" -c ;"
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret == 0:
log("[I] LANGUAGE plpgsql created successfully", "info")
else:
log("[E] LANGUAGE plpgsql creation failed", "error")
sys.exit(1)
def import_db_patches(self, db_name, db_user, db_password, file_name):
self.create_language_plpgsql(db_user, db_password, db_name)
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
ret=0
log("[I] Patch "+ name +" has been applied by some other process!" ,"info")
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
def import_auditdb_patches(self, xa_sqlObj,xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Checking XA_ACCESS_AUDIT table to apply audit db patches --------- ","info")
self.create_language_plpgsql(db_user, db_password, audit_db_name)
output = self.check_table(audit_db_name, db_user, db_password, TABLE_NAME)
if output == True:
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + audit_db_name + " from file: " + name,"info")
get_cmd1 = xa_sqlObj.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
get_cmd2 = self.get_jisql_cmd(db_user, db_password, audit_db_name)
if is_unix:
query = get_cmd2 + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd2 + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
else:
log("[I] Table XA_ACCESS_AUDIT does not exists in " +audit_db_name,"error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
log("[I] Verifying table " + TABLE_NAME +" in database " + db_name, "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select * from (select table_name from information_schema.tables where table_catalog='%s' and table_name = '%s') as temp;\"" %(db_name , TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select * from (select table_name from information_schema.tables where table_catalog='%s' and table_name = '%s') as temp;\" -c ;" %(db_name , TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME +" |"):
log("[I] Table " + TABLE_NAME +" already exists in database " + db_name, "info")
return True
else:
log("[I] Table " + TABLE_NAME +" does not exist in database " + db_name, "info")
return False
def auditdb_operation(self, xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Check admin user connection ---------","info")
self.check_connection(audit_db_name, db_user, db_password)
log("[I] --------- Check audit user connection ---------","info")
self.check_connection(audit_db_name, audit_db_user, audit_db_password)
log("[I] --------- Check table ---------","info")
output = self.check_table(audit_db_name, audit_db_user, audit_db_password, TABLE_NAME)
if output == False:
self.import_db_file(audit_db_name, db_user, db_password, file_name)
self.grant_audit_db_user(audit_db_name ,db_user, audit_db_user, db_password,audit_db_password)
def execute_java_patches(self, xa_db_host, db_user, db_password, db_name):
my_dict = {}
version = ""
className = ""
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
javaFiles = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch")
if not os.path.exists(javaFiles):
log("[I] No java patches to apply!","info")
else:
files = os.listdir(javaFiles)
if files:
for filename in files:
f = re.match("^Patch.*?.class$",filename)
if f:
className = re.match("(Patch.*?)_.*.class",filename)
className = className.group(1)
version = re.match("Patch.*?_(.*).class",filename)
version = version.group(1)
key3 = int(version.strip("J"))
my_dict[key3] = filename
keylist = my_dict.keys()
keylist.sort()
for key in keylist:
#print "%s: %s" % (key, my_dict[key])
version = str(key)
className = my_dict[key]
className = className.strip(".class")
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Java patch "+ className +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Java patch "+ className +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is being applied..","info")
else:
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.%s"%(self.JAVA_BIN,ranger_log,path,className)
if is_unix:
ret = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
ret = subprocess.call(get_java_cmd)
if ret == 0:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is applied..","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] applying java patch "+ className +" failed", "error")
sys.exit(1)
def change_admin_default_password(self, xa_db_host, db_user, db_password, db_name,userName,oldPassword,newPassword):
my_dict = {}
version = ""
className = "ChangePasswordUtil"
version = 'DEFAULT_ADMIN_UPDATE'
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
filePath = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch","cliutil","ChangePasswordUtil.class")
if os.path.exists(filePath):
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Ranger admin default password has already been changed!!","info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Ranger Password change utility is being executed by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Ranger admin default password change request is in process..","info")
else:
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.cliutil.%s %s %s %s -default"%(self.JAVA_BIN,ranger_log,path,className,userName,oldPassword,newPassword)
if is_unix:
status = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
status = subprocess.call(get_java_cmd)
if status == 0 or status==2:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0 and status == 0:
log ("[I] Ranger admin default password change request processed successfully..","info")
elif ret == 0 and status == 2:
log ("[I] Ranger admin default password change request process skipped!","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
def create_version_history_table(self, db_name, db_user, db_password, file_name,table_name):
name = basename(file_name)
if os.path.isfile(file_name):
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
if isTableExist==False:
log("[I] Importing "+table_name+" table schema to database " + db_name + " from file: " + name,"info")
while(isTableExist==False):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " file imported successfully","info")
else:
log("[E] "+name + " file import failed!","error")
time.sleep(30)
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
else:
log("[E] Table schema file " + name+ " not found","error")
sys.exit(1)
def import_core_db_schema(self, db_name, db_user, db_password, file_name,first_table,last_table):
version = 'CORE_DB_SCHEMA'
if os.path.isfile(file_name):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] "+version+" is already imported" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','N') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', now(), '%s', now(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" import failed", "error")
sys.exit(1)
isFirstTableExist = self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist = self.check_table(db_name, db_user, db_password, last_table)
isSchemaCreated=False
if isFirstTableExist == True and isLastTableExist == True :
isSchemaCreated=True
elif isFirstTableExist == False and isLastTableExist == False :
isImported=self.import_db_file(db_name, db_user, db_password, file_name)
if(isImported==False):
log("[I] "+ version +" might being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isLastTableExist==True):
isSchemaCreated=True
elif isFirstTableExist == False or isLastTableExist == False :
while(isFirstTableExist == False or isLastTableExist==False):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isFirstTableExist=self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isFirstTableExist == True and isLastTableExist==True):
isSchemaCreated=True
if isSchemaCreated == True:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+version +" import status has been updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating "+version +" import status failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\"" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+version + " import failed!","error")
sys.exit(1)
def hasPendingPatches(self, db_name, db_user, db_password, version):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\"" %(version,ranger_version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\" -c ;" %(version,ranger_version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
return False
else:
return True
def update_applied_patches_status(self,db_name, db_user, db_password,version):
if self.hasPendingPatches(db_name, db_user, db_password,version) == True:
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','Y') ;\"" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', current_timestamp, '%s', current_timestamp, '%s','Y') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" status entry to x_db_version_h table failed", "error")
sys.exit(1)
else:
log("[I] "+ version +" status entry to x_db_version_h table completed", "info")
class SqlServerConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password, db_name):
#TODO: User array for forming command
path = RANGER_ADMIN_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -user %s -p '%s' -driver mssql -cstring jdbc:sqlserver://%s\\;databaseName=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, user, password, self.host,db_name)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\\jisql\\lib\\* org.apache.util.sql.Jisql -user %s -p \"%s\" -driver mssql -cstring jdbc:sqlserver://%s;databaseName=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, user, password, self.host,db_name)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
isImported=False
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " DB schema imported successfully","info")
isImported=True
else:
log("[E] "+name + " DB Schema import failed!","error")
else:
log("[E] DB schema file " + name+ " not found","error")
sys.exit(1)
return isImported
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT TABLE_NAME FROM information_schema.tables where table_name = '%s';\"" %(TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT TABLE_NAME FROM information_schema.tables where table_name = '%s';\" -c ;" %(TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME + " |"):
log("[I] Table '" + TABLE_NAME + "' already exists in database '" + db_name + "'","info")
return True
else:
log("[I] Table '" + TABLE_NAME + "' does not exist in database '" + db_name + "'","info")
return False
def grant_audit_db_user(self, audit_db_name, db_user, audit_db_user, db_password, audit_db_password,TABLE_NAME):
log("[I] Granting permission to audit user '" + audit_db_user + "' on db '" + audit_db_name + "'","info")
get_cmd = self.get_jisql_cmd(db_user, db_password,audit_db_name)
if is_unix:
query = get_cmd + " -c \; -query \"USE %s GRANT SELECT,INSERT to %s;\"" %(audit_db_name ,audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"USE %s GRANT SELECT,INSERT to %s;\" -c ;" %(audit_db_name ,audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0 :
sys.exit(1)
else:
log("[I] Permission granted to audit user " + audit_db_user , "info")
def import_db_patches(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
time.sleep(1)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
ret=0
log("[I] Patch "+ name +" has been applied by some other process!" ,"info")
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
def import_auditdb_patches(self, xa_sqlObj,xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Checking XA_ACCESS_AUDIT table to apply audit db patches --------- ","info")
output = self.check_table(audit_db_name, db_user, db_password, TABLE_NAME)
if output == True:
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + audit_db_name + " from file: " + name,"info")
get_cmd1 = xa_sqlObj.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd1 + " -c \; query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd1 + " -c \; query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
get_cmd2 = self.get_jisql_cmd(db_user, db_password, audit_db_name)
if is_unix:
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
if is_unix:
query = get_cmd2 + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd2 + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
else:
log("[I] Table XA_ACCESS_AUDIT does not exists in " +audit_db_name,"error")
sys.exit(1)
def auditdb_operation(self, xa_db_host, audit_db_host, db_name, audit_db_name,db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Check admin user connection --------- ","info")
self.check_connection(audit_db_name, db_user, db_password)
log("[I] --------- Check audit user connection --------- ","info")
self.check_connection(audit_db_name, audit_db_user, audit_db_password)
log("[I] --------- Check audit table exists --------- ","info")
output = self.check_table(audit_db_name, db_user, db_password, TABLE_NAME)
if output == False:
self.import_db_file(audit_db_name ,db_user, db_password, file_name)
self.grant_audit_db_user( audit_db_name ,db_user, audit_db_user, db_password,audit_db_password,TABLE_NAME)
def execute_java_patches(self, xa_db_host, db_user, db_password, db_name):
my_dict = {}
version = ""
className = ""
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
javaFiles = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch")
if not os.path.exists(javaFiles):
log("[I] No java patches to apply!","info")
else:
files = os.listdir(javaFiles)
if files:
for filename in files:
f = re.match("^Patch.*?.class$",filename)
if f:
className = re.match("(Patch.*?)_.*.class",filename)
className = className.group(1)
version = re.match("Patch.*?_(.*).class",filename)
version = version.group(1)
key3 = int(version.strip("J"))
my_dict[key3] = filename
keylist = my_dict.keys()
keylist.sort()
for key in keylist:
#print "%s: %s" % (key, my_dict[key])
version = str(key)
className = my_dict[key]
className = className.strip(".class")
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Java patch "+ className +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Java patch "+ className +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is being applied..","info")
else:
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.%s"%(self.JAVA_BIN,ranger_log,path,className)
if is_unix:
ret = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
ret = subprocess.call(get_java_cmd)
if ret == 0:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is applied..","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] applying java patch "+ className +" failed", "error")
sys.exit(1)
def change_admin_default_password(self, xa_db_host, db_user, db_password, db_name,userName,oldPassword,newPassword):
my_dict = {}
version = ""
className = "ChangePasswordUtil"
version = 'DEFAULT_ADMIN_UPDATE'
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
filePath = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch","cliutil","ChangePasswordUtil.class")
if os.path.exists(filePath):
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Ranger admin default password has already been changed!!","info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Ranger Password change utility is being executed by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Ranger admin default password change request is in process..","info")
else:
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.cliutil.%s %s %s %s -default"%(self.JAVA_BIN,ranger_log,path,className,userName,oldPassword,newPassword)
if is_unix:
status = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
status = subprocess.call(get_java_cmd)
if status == 0 or status==2:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0 and status == 0:
log ("[I] Ranger admin default password change request processed successfully..","info")
elif ret == 0 and status == 2:
log ("[I] Ranger admin default password change request process skipped!","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
def create_version_history_table(self, db_name, db_user, db_password, file_name,table_name):
name = basename(file_name)
if os.path.isfile(file_name):
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
if isTableExist==False:
log("[I] Importing "+table_name+" table schema to database " + db_name + " from file: " + name,"info")
while(isTableExist==False):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " file imported successfully","info")
else:
log("[E] "+name + " file import failed!","error")
time.sleep(30)
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
else:
log("[E] Table schema file " + name+ " not found","error")
sys.exit(1)
def import_core_db_schema(self, db_name, db_user, db_password, file_name,first_table,last_table):
version = 'CORE_DB_SCHEMA'
if os.path.isfile(file_name):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] "+version+" is already imported" ,"info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" import failed", "error")
sys.exit(1)
isFirstTableExist = self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist = self.check_table(db_name, db_user, db_password, last_table)
isSchemaCreated=False
if isFirstTableExist == True and isLastTableExist == True :
isSchemaCreated=True
elif isFirstTableExist == False and isLastTableExist == False :
isImported=self.import_db_file(db_name, db_user, db_password, file_name)
if(isImported==False):
log("[I] "+ version +" might being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isLastTableExist==True):
isSchemaCreated=True
elif isFirstTableExist == False or isLastTableExist == False :
while(isFirstTableExist == False or isLastTableExist==False):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isFirstTableExist=self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isFirstTableExist == True and isLastTableExist==True):
isSchemaCreated=True
if isSchemaCreated == True:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+version +" import status has been updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating "+version +" import status failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+version + " import failed!","error")
sys.exit(1)
def hasPendingPatches(self, db_name, db_user, db_password, version):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\"" %(version,ranger_version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\" -c ;" %(version,ranger_version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
return False
else:
return True
def update_applied_patches_status(self,db_name, db_user, db_password,version):
if self.hasPendingPatches(db_name, db_user, db_password,version) == True:
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','Y') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','Y') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" status entry to x_db_version_h table failed", "error")
sys.exit(1)
else:
log("[I] "+ version +" status entry to x_db_version_h table completed", "info")
class SqlAnywhereConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password, db_name):
path = RANGER_ADMIN_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -user %s -password '%s' -driver sapsajdbc4 -cstring jdbc:sqlanywhere:database=%s;host=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path,user, password,db_name,self.host)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\\jisql\\lib\\* org.apache.util.sql.Jisql -user %s -password '%s' -driver sapsajdbc4 -cstring jdbc:sqlanywhere:database=%s;host=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, user, password,db_name,self.host)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
isImported=False
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " DB schema imported successfully","info")
isImported=True
else:
log("[E] "+name + " DB Schema import failed!","error")
else:
log("[E] DB schema file " + name+ " not found","error")
sys.exit(1)
return isImported
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
self.set_options(db_name, db_user, db_password, TABLE_NAME)
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT name FROM sysobjects where name = '%s' and type='U';\"" %(TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT name FROM sysobjects where name = '%s' and type='U';\" -c ;" %(TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME + " |"):
log("[I] Table '" + TABLE_NAME + "' already exists in database '" + db_name + "'","info")
return True
else:
log("[I] Table '" + TABLE_NAME + "' does not exist in database '" + db_name + "'","info")
return False
def grant_audit_db_user(self, audit_db_name, db_user, audit_db_user, db_password, audit_db_password,TABLE_NAME):
log("[I] Granting permission to audit user '" + audit_db_user + "' on db '" + audit_db_name + "'","info")
get_cmd = self.get_jisql_cmd(db_user, db_password,audit_db_name)
if is_unix:
query = get_cmd + " -c \; -query \"GRANT INSERT ON XA_ACCESS_AUDIT to %s;\"" %(audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT INSERT ON XA_ACCESS_AUDIT to %s;\" -c ;" %(audit_db_user)
jisql_log(query, db_password)
ret = subprocessCallWithRetry(query)
if ret != 0 :
sys.exit(1)
else:
log("[I] Permission granted to audit user " + audit_db_user , "info")
def import_db_patches(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while output.strip(version + " |"):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
time.sleep(5)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
ret=0
log("[I] Patch "+ name +" has been applied by some other process!" ,"info")
if ret == 0:
log("[I] "+name + " patch applied","info")
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
def import_auditdb_patches(self, xa_sqlObj,xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Checking XA_ACCESS_AUDIT table to apply audit db patches --------- ","info")
output = self.check_table(audit_db_name, db_user, db_password, TABLE_NAME)
if output == True:
name = basename(file_name)
if os.path.isfile(file_name):
version = name.split('-')[0]
log("[I] Executing patch on " + audit_db_name + " from file: " + name,"info")
get_cmd1 = xa_sqlObj.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd1 + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Patch "+ name +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd1 + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while output.strip(version + " |"):
log("[I] Patch "+ name +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Patch "+ name +" is being applied..","info")
else:
log("[E] Patch "+ name +" failed", "error")
get_cmd2 = self.get_jisql_cmd(db_user, db_password, audit_db_name)
if is_unix:
query = get_cmd2 + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd2 + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
if is_unix:
log("[I] "+name + " patch applied","info")
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd1 + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Patch version updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating patch version failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+name + " import failed!","error")
sys.exit(1)
else:
log("[I] Table XA_ACCESS_AUDIT does not exists in " +audit_db_name,"error")
sys.exit(1)
def auditdb_operation(self, xa_db_host, audit_db_host, db_name, audit_db_name,db_user, audit_db_user, db_password, audit_db_password, file_name, TABLE_NAME):
log("[I] --------- Check admin user connection --------- ","info")
self.check_connection(audit_db_name, db_user, db_password)
log("[I] --------- Check audit user connection --------- ","info")
self.check_connection(audit_db_name, audit_db_user, audit_db_password)
log("[I] --------- Check audit table exists --------- ","info")
output = self.check_table(audit_db_name, db_user, db_password, TABLE_NAME)
if output == False:
self.import_db_file(audit_db_name ,db_user, db_password, file_name)
self.grant_audit_db_user( audit_db_name ,db_user, audit_db_user, db_password,audit_db_password,TABLE_NAME)
def execute_java_patches(self, xa_db_host, db_user, db_password, db_name):
my_dict = {}
version = ""
className = ""
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
javaFiles = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch")
if not os.path.exists(javaFiles):
log("[I] No java patches to apply!","info")
else:
files = os.listdir(javaFiles)
if files:
for filename in files:
f = re.match("^Patch.*?.class$",filename)
if f:
className = re.match("(Patch.*?)_.*.class",filename)
className = className.group(1)
version = re.match("Patch.*?_(.*).class",filename)
version = version.group(1)
key3 = int(version.strip("J"))
my_dict[key3] = filename
keylist = my_dict.keys()
keylist.sort()
for key in keylist:
#print "%s: %s" % (key, my_dict[key])
version = str(key)
className = my_dict[key]
className = className.strip(".class")
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Java patch "+ className +" is already applied" ,"info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = 'J%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Java patch "+ className +" is being applied by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('J%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is being applied..","info")
else:
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.%s"%(self.JAVA_BIN,ranger_log,path,className)
if is_unix:
ret = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
ret = subprocess.call(get_java_cmd)
if ret == 0:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] java patch "+ className +" is applied..","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] java patch "+ className +" failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='J%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] applying java patch "+ className +" failed", "error")
sys.exit(1)
def set_options(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"set option public.reserved_keywords='LIMIT';\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"set option public.reserved_keywords='LIMIT';\" -c ;"
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
if is_unix:
query = get_cmd + " -c \; -query \"set option public.max_statement_count=0;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"set option public.max_statement_count=0;\" -c;"
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
if is_unix:
query = get_cmd + " -c \; -query \"set option public.max_cursor_count=0;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"set option public.max_cursor_count=0;\" -c;"
jisql_log(query, db_password)
ret = subprocessCallWithRetry(shlex.split(query))
def change_admin_default_password(self, xa_db_host, db_user, db_password, db_name,userName,oldPassword,newPassword):
my_dict = {}
version = ""
className = "ChangePasswordUtil"
version = 'DEFAULT_ADMIN_UPDATE'
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
filePath = os.path.join(app_home,"WEB-INF","classes","org","apache","ranger","patch","cliutil","ChangePasswordUtil.class")
if os.path.exists(filePath):
if version != "":
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] Ranger admin default password has already been changed!!","info")
else:
if is_unix:
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c \;" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] Ranger Password change utility is being executed by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log ("[I] Ranger admin default password change request is in process..","info")
else:
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
if is_unix:
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF;%s" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home ,self.SQL_CONNECTOR_JAR)
get_java_cmd = "%s -Dlogdir=%s -Dlog4j.configuration=db_patch.log4j.xml -cp %s org.apache.ranger.patch.cliutil.%s %s %s %s -default"%(self.JAVA_BIN,ranger_log,path,className,userName,oldPassword,newPassword)
if is_unix:
status = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
status = subprocess.call(get_java_cmd)
if status == 0 or status==2:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0 and status == 0:
log ("[I] Ranger admin default password change request processed successfully..","info")
elif ret == 0 and status == 2:
log ("[I] Ranger admin default password change request process skipped!","info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Ranger admin default password change request failed", "error")
sys.exit(1)
def create_version_history_table(self, db_name, db_user, db_password, file_name,table_name):
name = basename(file_name)
if os.path.isfile(file_name):
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
if isTableExist==False:
log("[I] Importing "+table_name+" table schema to database " + db_name + " from file: " + name,"info")
while(isTableExist==False):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " file imported successfully","info")
else:
log("[E] "+name + " file import failed!","error")
time.sleep(30)
isTableExist=self.check_table(db_name, db_user, db_password, table_name)
else:
log("[E] Table schema file " + name+ " not found","error")
sys.exit(1)
def import_core_db_schema(self, db_name, db_user, db_password, file_name,first_table,last_table):
version = 'CORE_DB_SCHEMA'
if os.path.isfile(file_name):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'Y';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
log("[I] "+version+" is already imported" ,"info")
else:
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and active = 'N';\"" %(version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and active = 'N';\" -c ;" %(version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
while(output.strip(version + " |")):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
jisql_log(query, db_password)
output = check_output(query)
else:
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','N') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" import failed", "error")
sys.exit(1)
isFirstTableExist = self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist = self.check_table(db_name, db_user, db_password, last_table)
isSchemaCreated=False
if isFirstTableExist == True and isLastTableExist == True :
isSchemaCreated=True
elif isFirstTableExist == False and isLastTableExist == False :
isImported=self.import_db_file(db_name, db_user, db_password, file_name)
if(isImported==False):
log("[I] "+ version +" might being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isLastTableExist==True):
isSchemaCreated=True
elif isFirstTableExist == False or isLastTableExist == False :
while(isFirstTableExist == False or isLastTableExist==False):
log("[I] "+ version +" is being imported by some other process" ,"info")
time.sleep(retryPatchAfterSeconds)
isFirstTableExist = self.check_table(db_name, db_user, db_password, first_table)
isLastTableExist=self.check_table(db_name, db_user, db_password, last_table)
if(isFirstTableExist == True and isLastTableExist==True):
isSchemaCreated=True
if isSchemaCreated == True:
if is_unix:
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"update x_db_version_h set active='Y' where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+version +" import status has been updated", "info")
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] Updating "+version +" import status failed", "error")
sys.exit(1)
else:
if is_unix:
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c \;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"delete from x_db_version_h where version='%s' and active='N' and updated_by='%s';\" -c ;" %(version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
log("[E] "+version + " import failed!","error")
sys.exit(1)
def hasPendingPatches(self, db_name, db_user, db_password, version):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\"" %(version,ranger_version)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select version from x_db_version_h where version = '%s' and inst_by = '%s' and active = 'Y';\" -c ;" %(version,ranger_version)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(version + " |"):
return False
else:
return True
def update_applied_patches_status(self,db_name, db_user, db_password,version):
if self.hasPendingPatches(db_name, db_user, db_password,version) == True:
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','Y') ;\" -c \;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"insert into x_db_version_h (version, inst_at, inst_by, updated_at, updated_by,active) values ('%s', GETDATE(), '%s', GETDATE(), '%s','Y') ;\" -c ;" %(version,ranger_version,client_host)
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] "+ version +" status entry to x_db_version_h table failed", "error")
sys.exit(1)
else:
log("[I] "+ version +" status entry to x_db_version_h table completed", "info")
def main(argv):
populate_global_dict()
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
if (not 'JAVA_HOME' in os.environ) or (os.environ['JAVA_HOME'] == ""):
log("[E] ---------- JAVA_HOME environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
else:
JAVA_BIN=os.path.join(os.environ['JAVA_HOME'],'bin','java')
if os_name == "WINDOWS" :
JAVA_BIN = JAVA_BIN+'.exe'
if os.path.isfile(JAVA_BIN):
pass
else:
JAVA_BIN=globalDict['JAVA_BIN']
if os.path.isfile(JAVA_BIN):
pass
else:
log("[E] ---------- JAVA Not Found, aborting installation. ----------", "error")
sys.exit(1)
#get ranger version
global ranger_version
try:
lib_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp","WEB-INF","lib","*")
get_ranger_version_cmd="%s -cp %s org.apache.ranger.common.RangerVersionInfo"%(JAVA_BIN,lib_home)
ranger_version = check_output(get_ranger_version_cmd).split("\n")[1]
except Exception, error:
ranger_version=''
try:
if ranger_version=="" or ranger_version=="ranger-admin - None":
script_path = os.path.join(RANGER_ADMIN_HOME,"ews","ranger-admin-services.sh")
ranger_version=check_output(script_path +" version").split("\n")[1]
except Exception, error:
ranger_version=''
try:
if ranger_version=="" or ranger_version=="ranger-admin - None":
ranger_version=check_output("ranger-admin version").split("\n")[1]
except Exception, error:
ranger_version=''
if ranger_version=="" or ranger_version is None:
log("[E] Unable to find ranger version details, Exiting..", "error")
sys.exit(1)
XA_DB_FLAVOR=globalDict['DB_FLAVOR']
AUDIT_DB_FLAVOR=globalDict['DB_FLAVOR']
XA_DB_FLAVOR = XA_DB_FLAVOR.upper()
AUDIT_DB_FLAVOR = AUDIT_DB_FLAVOR.upper()
log("[I] DB FLAVOR :" + XA_DB_FLAVOR ,"info")
xa_db_host = globalDict['db_host']
audit_db_host = globalDict['db_host']
mysql_dbversion_catalog = os.path.join('db','mysql','create_dbversion_catalog.sql')
mysql_core_file = globalDict['mysql_core_file']
mysql_audit_file = globalDict['mysql_audit_file']
mysql_patches = os.path.join('db','mysql','patches')
mysql_auditdb_patches = os.path.join('db','mysql','patches','audit')
oracle_dbversion_catalog = os.path.join('db','oracle','create_dbversion_catalog.sql')
oracle_core_file = globalDict['oracle_core_file']
oracle_audit_file = globalDict['oracle_audit_file']
oracle_patches = os.path.join('db','oracle','patches')
oracle_auditdb_patches = os.path.join('db','oracle','patches','audit')
postgres_dbversion_catalog = os.path.join('db','postgres','create_dbversion_catalog.sql')
postgres_core_file = globalDict['postgres_core_file']
postgres_audit_file = globalDict['postgres_audit_file']
postgres_patches = os.path.join('db','postgres','patches')
postgres_auditdb_patches = os.path.join('db','postgres','patches','audit')
sqlserver_dbversion_catalog = os.path.join('db','sqlserver','create_dbversion_catalog.sql')
sqlserver_core_file = globalDict['sqlserver_core_file']
sqlserver_audit_file = globalDict['sqlserver_audit_file']
sqlserver_patches = os.path.join('db','sqlserver','patches')
sqlserver_auditdb_patches = os.path.join('db','sqlserver','patches','audit')
sqlanywhere_dbversion_catalog = os.path.join('db','sqlanywhere','create_dbversion_catalog.sql')
sqlanywhere_core_file = globalDict['sqlanywhere_core_file']
sqlanywhere_audit_file = globalDict['sqlanywhere_audit_file']
sqlanywhere_patches = os.path.join('db','sqlanywhere','patches')
sqlanywhere_auditdb_patches = os.path.join('db','sqlanywhere','patches','audit')
db_name = globalDict['db_name']
db_user = globalDict['db_user']
db_password = globalDict['db_password']
x_db_version = 'x_db_version_h'
xa_access_audit = 'xa_access_audit'
audit_db_name=''
audit_db_user=''
audit_db_password=''
audit_store = None
if 'audit_store' in globalDict:
audit_store = globalDict['audit_store']
audit_store=audit_store.lower()
if audit_store =='db':
if 'audit_db_name' in globalDict:
audit_db_name = globalDict['audit_db_name']
if 'audit_db_user' in globalDict:
audit_db_user = globalDict['audit_db_user']
if 'audit_db_password' in globalDict:
audit_db_password = globalDict['audit_db_password']
db_ssl_enabled='false'
db_ssl_required='false'
db_ssl_verifyServerCertificate='false'
javax_net_ssl_keyStore=''
javax_net_ssl_keyStorePassword=''
javax_net_ssl_trustStore=''
javax_net_ssl_trustStorePassword=''
if XA_DB_FLAVOR == "MYSQL":
if 'db_ssl_enabled' in globalDict:
db_ssl_enabled=globalDict['db_ssl_enabled'].lower()
if db_ssl_enabled == 'true':
if 'db_ssl_required' in globalDict:
db_ssl_required=globalDict['db_ssl_required'].lower()
if 'db_ssl_verifyServerCertificate' in globalDict:
db_ssl_verifyServerCertificate=globalDict['db_ssl_verifyServerCertificate'].lower()
if db_ssl_verifyServerCertificate == 'true':
if 'javax_net_ssl_keyStore' in globalDict:
javax_net_ssl_keyStore=globalDict['javax_net_ssl_keyStore']
if 'javax_net_ssl_keyStorePassword' in globalDict:
javax_net_ssl_keyStorePassword=globalDict['javax_net_ssl_keyStorePassword']
if 'javax_net_ssl_trustStore' in globalDict:
javax_net_ssl_trustStore=globalDict['javax_net_ssl_trustStore']
if 'javax_net_ssl_trustStorePassword' in globalDict:
javax_net_ssl_trustStorePassword=globalDict['javax_net_ssl_trustStorePassword']
if not os.path.exists(javax_net_ssl_keyStore):
log("[E] Invalid file Name! Unable to find keystore file:"+javax_net_ssl_keyStore,"error")
sys.exit(1)
if not os.path.exists(javax_net_ssl_trustStore):
log("[E] Invalid file Name! Unable to find truststore file:"+javax_net_ssl_trustStore,"error")
sys.exit(1)
if javax_net_ssl_keyStorePassword is None or javax_net_ssl_keyStorePassword =="":
log("[E] Invalid ssl keystore password!","error")
sys.exit(1)
if javax_net_ssl_trustStorePassword is None or javax_net_ssl_trustStorePassword =="":
log("[E] Invalid ssl truststore password!","error")
sys.exit(1)
MYSQL_CONNECTOR_JAR=globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = MysqlConf(xa_db_host, MYSQL_CONNECTOR_JAR, JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword)
xa_db_version_file = os.path.join(RANGER_ADMIN_HOME , mysql_dbversion_catalog)
xa_db_core_file = os.path.join(RANGER_ADMIN_HOME , mysql_core_file)
xa_patch_file = os.path.join(RANGER_ADMIN_HOME ,mysql_patches)
audit_patch_file = os.path.join(RANGER_ADMIN_HOME ,mysql_auditdb_patches)
first_table='x_asset'
last_table='xa_access_audit'
elif XA_DB_FLAVOR == "ORACLE":
ORACLE_CONNECTOR_JAR=globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = OracleConf(xa_db_host, ORACLE_CONNECTOR_JAR, JAVA_BIN)
xa_db_version_file = os.path.join(RANGER_ADMIN_HOME ,oracle_dbversion_catalog)
xa_db_core_file = os.path.join(RANGER_ADMIN_HOME ,oracle_core_file)
xa_patch_file = os.path.join(RANGER_ADMIN_HOME ,oracle_patches)
audit_patch_file = os.path.join(RANGER_ADMIN_HOME ,oracle_auditdb_patches)
first_table='X_PORTAL_USER'
last_table='X_AUDIT_MAP'
elif XA_DB_FLAVOR == "POSTGRES":
db_user=db_user.lower()
db_name=db_name.lower()
POSTGRES_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = PostgresConf(xa_db_host, POSTGRES_CONNECTOR_JAR, JAVA_BIN)
xa_db_version_file = os.path.join(RANGER_ADMIN_HOME , postgres_dbversion_catalog)
xa_db_core_file = os.path.join(RANGER_ADMIN_HOME , postgres_core_file)
xa_patch_file = os.path.join(RANGER_ADMIN_HOME , postgres_patches)
audit_patch_file = os.path.join(RANGER_ADMIN_HOME ,postgres_auditdb_patches)
first_table='x_portal_user'
last_table='x_group_module_perm'
elif XA_DB_FLAVOR == "MSSQL":
SQLSERVER_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = SqlServerConf(xa_db_host, SQLSERVER_CONNECTOR_JAR, JAVA_BIN)
xa_db_version_file = os.path.join(RANGER_ADMIN_HOME ,sqlserver_dbversion_catalog)
xa_db_core_file = os.path.join(RANGER_ADMIN_HOME , sqlserver_core_file)
xa_patch_file = os.path.join(RANGER_ADMIN_HOME , sqlserver_patches)
audit_patch_file = os.path.join(RANGER_ADMIN_HOME ,sqlserver_auditdb_patches)
first_table='x_portal_user'
last_table='x_group_module_perm'
elif XA_DB_FLAVOR == "SQLA":
if not os_name == "WINDOWS" :
if os.environ['LD_LIBRARY_PATH'] == "":
log("[E] ---------- LD_LIBRARY_PATH environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
SQLANYWHERE_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = SqlAnywhereConf(xa_db_host, SQLANYWHERE_CONNECTOR_JAR, JAVA_BIN)
xa_db_version_file = os.path.join(RANGER_ADMIN_HOME ,sqlanywhere_dbversion_catalog)
xa_db_core_file = os.path.join(RANGER_ADMIN_HOME , sqlanywhere_core_file)
xa_patch_file = os.path.join(RANGER_ADMIN_HOME , sqlanywhere_patches)
audit_patch_file = os.path.join(RANGER_ADMIN_HOME ,sqlanywhere_auditdb_patches)
first_table='x_portal_user'
last_table='x_group_module_perm'
else:
log("[E] --------- NO SUCH SUPPORTED DB FLAVOUR!! ---------", "error")
sys.exit(1)
if AUDIT_DB_FLAVOR == "MYSQL":
MYSQL_CONNECTOR_JAR=globalDict['SQL_CONNECTOR_JAR']
audit_sqlObj = MysqlConf(audit_db_host,MYSQL_CONNECTOR_JAR,JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword)
audit_db_file = os.path.join(RANGER_ADMIN_HOME ,mysql_audit_file)
elif AUDIT_DB_FLAVOR == "ORACLE":
ORACLE_CONNECTOR_JAR=globalDict['SQL_CONNECTOR_JAR']
audit_sqlObj = OracleConf(audit_db_host, ORACLE_CONNECTOR_JAR, JAVA_BIN)
audit_db_file = os.path.join(RANGER_ADMIN_HOME , oracle_audit_file)
elif AUDIT_DB_FLAVOR == "POSTGRES":
audit_db_user=audit_db_user.lower()
audit_db_name=audit_db_name.lower()
POSTGRES_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
audit_sqlObj = PostgresConf(audit_db_host, POSTGRES_CONNECTOR_JAR, JAVA_BIN)
audit_db_file = os.path.join(RANGER_ADMIN_HOME , postgres_audit_file)
elif AUDIT_DB_FLAVOR == "MSSQL":
SQLSERVER_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
audit_sqlObj = SqlServerConf(audit_db_host, SQLSERVER_CONNECTOR_JAR, JAVA_BIN)
audit_db_file = os.path.join(RANGER_ADMIN_HOME , sqlserver_audit_file)
elif AUDIT_DB_FLAVOR == "SQLA":
SQLANYWHERE_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
audit_sqlObj = SqlAnywhereConf(audit_db_host, SQLANYWHERE_CONNECTOR_JAR, JAVA_BIN)
audit_db_file = os.path.join(RANGER_ADMIN_HOME , sqlanywhere_audit_file)
else:
log("[E] --------- NO SUCH SUPPORTED DB FLAVOUR!! ---------", "error")
sys.exit(1)
log("[I] --------- Verifying Ranger DB connection ---------","info")
xa_sqlObj.check_connection(db_name, db_user, db_password)
if len(argv)==1:
log("[I] --------- Verifying version history table ---------","info")
output = xa_sqlObj.check_table(db_name, db_user, db_password, x_db_version)
if output == False:
xa_sqlObj.create_version_history_table(db_name, db_user, db_password, xa_db_version_file,x_db_version)
log("[I] --------- Importing Ranger Core DB Schema ---------","info")
xa_sqlObj.import_core_db_schema(db_name, db_user, db_password, xa_db_core_file,first_table,last_table)
if XA_DB_FLAVOR == "ORACLE":
if xa_sqlObj.check_table(db_name, db_user, db_password, xa_access_audit):
if audit_db_user != "" and db_user != audit_db_user:
xa_sqlObj.create_synonym(db_name, db_user, db_password,audit_db_user)
applyDBPatches=xa_sqlObj.hasPendingPatches(db_name, db_user, db_password, "DB_PATCHES")
if applyDBPatches == True:
log("[I] --------- Applying Ranger DB patches ---------","info")
xa_sqlObj.apply_patches(db_name, db_user, db_password, xa_patch_file)
else:
log("[I] DB_PATCHES have already been applied","info")
if audit_store == "db" and audit_db_password!='':
log("[I] --------- Starting Audit Operation ---------","info")
audit_sqlObj.auditdb_operation(xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, audit_db_file, xa_access_audit)
log("[I] --------- Applying Audit DB patches ---------","info")
audit_sqlObj.apply_auditdb_patches(xa_sqlObj,xa_db_host, audit_db_host, db_name, audit_db_name, db_user, audit_db_user, db_password, audit_db_password, audit_patch_file, xa_access_audit)
if len(argv)>1:
for i in range(len(argv)):
if str(argv[i]) == "-javapatch":
applyJavaPatches=xa_sqlObj.hasPendingPatches(db_name, db_user, db_password, "JAVA_PATCHES")
if applyJavaPatches == True:
log("[I] ----------------- Applying java patches ------------", "info")
xa_sqlObj.execute_java_patches(xa_db_host, db_user, db_password, db_name)
xa_sqlObj.update_applied_patches_status(db_name,db_user, db_password,"JAVA_PATCHES")
else:
log("[I] JAVA_PATCHES have already been applied","info")
if str(argv[i]) == "-changepassword":
if len(argv)==5:
userName=argv[2]
oldPassword=argv[3]
newPassword=argv[4]
if oldPassword==newPassword:
log("[E] Old Password and New Password argument are same. Exiting!!", "error")
sys.exit(1)
if userName != "" and oldPassword != "" and newPassword != "":
password_validation(newPassword)
xa_sqlObj.change_admin_default_password(xa_db_host, db_user, db_password, db_name,userName,oldPassword,newPassword)
else:
log("[E] Invalid argument list.", "error")
log("[I] Usage : python db_setup.py -changepassword <loginID> <currentPassword> <newPassword>","info")
sys.exit(1)
main(sys.argv)
|
[] |
[] |
[
"LD_LIBRARY_PATH",
"RANGER_ADMIN_CONF",
"RANGER_ADMIN_HOME",
"JAVA_HOME"
] |
[]
|
["LD_LIBRARY_PATH", "RANGER_ADMIN_CONF", "RANGER_ADMIN_HOME", "JAVA_HOME"]
|
python
| 4 | 0 | |
e2e/builder/build_test.go
|
//go:build integration
// +build integration
// To enable compilation of this file in Goland, go to "Settings -> Go -> Vendoring & Build Tags -> Custom Tags" and add "integration"
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder
import (
"os"
"testing"
"time"
. "github.com/onsi/gomega"
. "github.com/apache/camel-k/e2e/support"
"github.com/apache/camel-k/pkg/apis/camel/v1"
"github.com/apache/camel-k/pkg/util/openshift"
)
type kitOptions struct {
dependencies []string
traits []string
}
func TestKitTimerToLogFullBuild(t *testing.T) {
doKitFullBuild(t, "timer-to-log", "300Mi", "5m0s", TestTimeoutLong, kitOptions{
dependencies: []string{
"camel:timer", "camel:log",
},
})
}
func TestKitKnativeFullBuild(t *testing.T) {
doKitFullBuild(t, "knative", "300Mi", "5m0s", TestTimeoutLong, kitOptions{
dependencies: []string{
"camel-k-knative",
},
})
}
func TestKitTimerToLogFullNativeBuild(t *testing.T) {
doKitFullBuild(t, "timer-to-log", "4Gi", "15m0s", 2*TestTimeoutLong, kitOptions{
dependencies: []string{
"camel:timer", "camel:log",
},
traits: []string{
"quarkus.package-type=native",
},
})
}
func doKitFullBuild(t *testing.T, name string, memoryLimit string, buildTimeout string, testTimeout time.Duration, options kitOptions) {
WithNewTestNamespace(t, func(ns string) {
strategy := os.Getenv("KAMEL_INSTALL_BUILD_PUBLISH_STRATEGY")
ocp, err := openshift.IsOpenShift(TestClient())
Expect(err).To(Succeed())
args := []string{"install", "-n", ns}
args = append(args, "--build-timeout", buildTimeout)
// TODO: configure build Pod resources if applicable
if strategy == "Spectrum" || ocp {
args = append(args, "--operator-resources", "limits.memory="+memoryLimit)
}
Expect(Kamel(args...).Execute()).To(Succeed())
buildKitArgs := []string{"kit", "create", name, "-n", ns}
for _, dependency := range options.dependencies {
buildKitArgs = append(buildKitArgs, "-d", dependency)
}
for _, trait := range options.traits {
buildKitArgs = append(buildKitArgs, "-t", trait)
}
Expect(Kamel(buildKitArgs...).Execute()).To(Succeed())
Eventually(Build(ns, name)).ShouldNot(BeNil())
Eventually(BuildPhase(ns, name), testTimeout).Should(Equal(v1.BuildPhaseSucceeded))
Eventually(KitPhase(ns, name), testTimeout).Should(Equal(v1.IntegrationKitPhaseReady))
})
}
|
[
"\"KAMEL_INSTALL_BUILD_PUBLISH_STRATEGY\""
] |
[] |
[
"KAMEL_INSTALL_BUILD_PUBLISH_STRATEGY"
] |
[]
|
["KAMEL_INSTALL_BUILD_PUBLISH_STRATEGY"]
|
go
| 1 | 0 | |
elastic/service.go
|
package elastic
import (
"bytes"
"fmt"
"io"
"math"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"encoding/csv"
"io/ioutil"
"net/http"
"github.com/go-openapi/strfmt"
"github.com/LF-Engineering/dev-analytics-affiliation/errs"
"github.com/LF-Engineering/dev-analytics-affiliation/gen/models"
"github.com/LF-Engineering/dev-analytics-affiliation/shared"
"github.com/elastic/go-elasticsearch/v7"
"github.com/elastic/go-elasticsearch/v7/esapi"
log "github.com/LF-Engineering/dev-analytics-affiliation/logging"
jsoniter "github.com/json-iterator/go"
)
// TopContributorsCacheEntry - top contributors single cache entry
type TopContributorsCacheEntry struct {
Top *models.TopContributorsFlatOutput `json:"v"`
Tm time.Time `json:"t"`
Key string `json:"k"`
}
// Service - interface to access ES data
type Service interface {
shared.ServiceInterface
// External methods
GetUnaffiliated([]string, int64) (*models.GetUnaffiliatedOutput, error)
AggsUnaffiliated(string, int64) ([]*models.UnaffiliatedDataOutput, error)
ContributorsCount(string, string) (int64, error)
GetTopContributors([]string, []string, int64, int64, int64, int64, string, string, string) (*models.TopContributorsFlatOutput, error)
UpdateByQuery(string, string, interface{}, string, interface{}, bool) error
DetAffRange([]*models.EnrollmentProjectRange) ([]*models.EnrollmentProjectRange, string, error)
GetUUIDsProjects([]string) (map[string][]string, string, error)
// ES Cache methods
TopContributorsCacheGet(string) (*TopContributorsCacheEntry, bool)
TopContributorsCacheSet(string, *TopContributorsCacheEntry)
TopContributorsCacheDelete(string)
TopContributorsCacheDeleteExpired()
// Internal methods
projectSlugToIndexPattern(string) string
projectSlugToIndexPatterns(string, []string) []string
projectSlugsToIndexPattern([]string) string
projectSlugsToIndexPatterns([]string, []string) []string
mapDataSourceTypes([]string) []string
contributorStatsMainQuery(string, string, string, int64, int64, int64, int64, string, string, string) (string, error)
contributorStatsMergeQuery(string, string, string, string, string, string, int64, int64, bool) (string, error)
dataSourceTypeFields(string) (map[string]string, error)
searchCondition(string, string) (string, error)
getAllStringFields(string) ([]string, error)
additionalWhere(string, string) (string, error)
having(string, string) (string, error)
orderBy(string, string, string) (string, error)
dataSourceQuery(string) (map[string][]string, bool, error)
search(string, io.Reader) (*esapi.Response, error)
}
type service struct {
shared.ServiceStruct
client *elasticsearch.Client
url string
}
type aggsUnaffiliatedResult struct {
Aggregations struct {
Unaffiliated struct {
Unaffiliated struct {
Buckets []struct {
Key string `json:"key"`
DocCount int64 `json:"doc_count"`
} `json:"buckets"`
} `json:"unaffiliated"`
} `json:"unaffiliated"`
} `json:"aggregations"`
}
func (s *service) TopContributorsCacheGet(key string) (entry *TopContributorsCacheEntry, ok bool) {
data := `{"query":{"term":{"k.keyword":{"value": "` + s.JSONEscape(key) + `"}}}}`
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/es_cache/_search", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
log.Warn(fmt.Sprintf("New request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Warn(fmt.Sprintf("do request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
log.Warn(fmt.Sprintf("ReadAll non-ok request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
_ = resp.Body.Close()
if resp.StatusCode != 200 {
log.Warn(fmt.Sprintf("Method:%s url:%s data: %s status:%d\n%s\n", method, url, data, resp.StatusCode, body))
return
}
type Result struct {
Hits struct {
Hits []struct {
Source TopContributorsCacheEntry `json:"_source"`
} `json:"hits"`
} `json:"hits"`
Data []interface{} `json:"rows"`
}
var result Result
err = jsoniter.Unmarshal(body, &result)
if err != nil {
log.Warn(fmt.Sprintf("Unmarshal error: %+v", err))
return
}
if len(result.Hits.Hits) == 0 {
return
}
entry = &(result.Hits.Hits[0].Source)
ok = true
return
}
func (s *service) TopContributorsCacheSet(key string, entry *TopContributorsCacheEntry) {
entry.Key = key
payloadBytes, err := jsoniter.Marshal(entry)
if err != nil {
log.Warn(fmt.Sprintf("json %+v marshal error: %+v\n", entry, err))
return
}
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/es_cache/_doc", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
data := string(payloadBytes)
log.Warn(fmt.Sprintf("New request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
data := string(payloadBytes)
log.Warn(fmt.Sprintf("do request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != 201 {
data := string(payloadBytes)
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
log.Warn(fmt.Sprintf("ReadAll non-ok request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
log.Warn(fmt.Sprintf("Method:%s url:%s data: %s status:%d\n%s\n", method, url, data, resp.StatusCode, body))
return
}
return
}
func (s *service) TopContributorsCacheDelete(key string) {
data := `{"query":{"term":{"k.keyword":{"value": "` + s.JSONEscape(key) + `"}}}}`
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/es_cache/_delete_by_query", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
log.Warn(fmt.Sprintf("New request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Warn(fmt.Sprintf("do request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
log.Warn(fmt.Sprintf("ReadAll non-ok request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
log.Warn(fmt.Sprintf("Method:%s url:%s data: %s status:%d\n%s\n", method, url, data, resp.StatusCode, body))
return
}
}
func (s *service) TopContributorsCacheDeleteExpired() {
data := `{"query":{"range":{"t":{"lte": "` + shared.ESCacheTTL + `"}}}}`
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/es_cache/_delete_by_query", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
log.Warn(fmt.Sprintf("New request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Warn(fmt.Sprintf("do request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
log.Warn(fmt.Sprintf("ReadAll non-ok request error: %+v for %s url: %s, data: %s\n", err, method, url, data))
return
}
log.Warn(fmt.Sprintf("Method:%s url:%s data: %s status:%d\n%s\n", method, url, data, resp.StatusCode, body))
return
}
}
// New return ES connection
func New(client *elasticsearch.Client, url string) Service {
return &service{
client: client,
url: url,
}
}
func (s *service) GetUUIDsProjects(projects []string) (uuidsProjects map[string][]string, status string, err error) {
log.Info(fmt.Sprintf("GetUUIDsProjects: projects:%d", len(projects)))
uuidsProjects = make(map[string][]string)
projectsUUIDs := make(map[string][]string)
defer func() {
log.Info(fmt.Sprintf("GetUUIDsProjects(exit): projects:%d projectsUUIDs:%d uuidsProjects:%d status:%s err:%v", len(projects), len(projectsUUIDs), len(uuidsProjects), status, err))
}()
type projectsResult struct {
project string
uuids []string
err error
}
getProjectsUUIDs := func(ch chan projectsResult, project string) (res projectsResult) {
defer func() {
if ch != nil {
ch <- res
}
}()
res.project = project
pattern := "sds-" + strings.Replace(strings.TrimSpace(project), "/", "-", -1) + "-*,-*-raw,-*-for-merge"
data := fmt.Sprintf(
`{"query":"select author_uuid from \"%s\" where author_uuid is not null and author_uuid != '' group by author_uuid order by author_uuid","fetch_size":%d}`,
//`{"query":"select author_uuid from \"%s\" where author_uuid is not null and author_uuid != '' and author_uuid = 'fd78aef3e68d9f31177e87c1c0ec37a9a77ba6c5' group by author_uuid order by author_uuid","fetch_size":%d}`,
s.JSONEscape(pattern),
shared.FetchSize,
)
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/_sql?format=json", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
res.err = fmt.Errorf("new request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
res.err = fmt.Errorf("do request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
res.err = fmt.Errorf("readAll non-ok request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
_ = resp.Body.Close()
if resp.StatusCode != 200 {
res.err = fmt.Errorf("method:%s url:%s data: %s status:%d\n%s", method, url, data, resp.StatusCode, body)
return
}
type uuidsResult struct {
Cursor string `json:"cursor"`
Rows [][]string `json:"rows"`
}
var result uuidsResult
err = jsoniter.Unmarshal(body, &result)
if err != nil {
res.err = fmt.Errorf("unmarshal error: %+v", err)
return
}
for _, row := range result.Rows {
res.uuids = append(res.uuids, row[0])
}
if len(result.Rows) == 0 {
return
}
for {
data = `{"cursor":"` + result.Cursor + `"}`
payloadBytes = []byte(data)
payloadBody = bytes.NewReader(payloadBytes)
req, err = http.NewRequest(method, url, payloadBody)
if err != nil {
res.err = fmt.Errorf("new request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
req.Header.Set("Content-Type", "application/json")
resp, err = http.DefaultClient.Do(req)
if err != nil {
res.err = fmt.Errorf("do request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
res.err = fmt.Errorf("readAll non-ok request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
_ = resp.Body.Close()
if resp.StatusCode != 200 {
res.err = fmt.Errorf("method:%s url:%s data: %s status:%d\n%s", method, url, data, resp.StatusCode, body)
return
}
err = jsoniter.Unmarshal(body, &result)
if err != nil {
res.err = fmt.Errorf("unmarshal error: %+v", err)
return
}
if len(result.Rows) == 0 {
break
}
for _, row := range result.Rows {
res.uuids = append(res.uuids, row[0])
}
}
url = fmt.Sprintf("%s/_sql/close", s.url)
data = `{"cursor":"` + result.Cursor + `"}`
payloadBytes = []byte(data)
payloadBody = bytes.NewReader(payloadBytes)
req, err = http.NewRequest(method, url, payloadBody)
if err != nil {
res.err = fmt.Errorf("new request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
req.Header.Set("Content-Type", "application/json")
resp, err = http.DefaultClient.Do(req)
if err != nil {
res.err = fmt.Errorf("do request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
res.err = fmt.Errorf("readAll non-ok request error: %+v for %s url: %s, data: %s", err, method, url, data)
return
}
_ = resp.Body.Close()
if resp.StatusCode != 200 {
res.err = fmt.Errorf("method:%s url:%s data: %s status:%d\n%s", method, url, data, resp.StatusCode, body)
return
}
// fmt.Printf("%s: %d rows\n", project, len(res.uuids))
return
}
processed := 0
all := len(projects)
progressInfo := func() {
processed++
if processed%10 == 0 {
log.Info(fmt.Sprintf("processed %d/%d\n", processed, all))
}
}
thrN := s.GetThreadsNum()
if thrN > 1 {
thrN = int(math.Round(math.Sqrt(float64(thrN))))
log.Info(fmt.Sprintf("Using %d parallel ES queries\n", thrN))
ch := make(chan projectsResult)
nThreads := 0
for _, project := range projects {
go getProjectsUUIDs(ch, project)
nThreads++
if nThreads == thrN {
res := <-ch
nThreads--
if res.err == nil {
if len(res.uuids) > 0 {
projectsUUIDs[res.project] = res.uuids
}
} else {
log.Info(fmt.Sprintf("%s: %v\n", res.project, res.err))
}
progressInfo()
}
}
for nThreads > 0 {
res := <-ch
nThreads--
if res.err == nil {
if len(res.uuids) > 0 {
projectsUUIDs[res.project] = res.uuids
}
} else {
log.Info(fmt.Sprintf("%s: %v\n", res.project, res.err))
}
progressInfo()
}
} else {
for _, project := range projects {
res := getProjectsUUIDs(nil, project)
if res.err == nil {
if len(res.uuids) > 0 {
projectsUUIDs[res.project] = res.uuids
}
} else {
log.Info(fmt.Sprintf("%s: %v\n", res.project, res.err))
}
progressInfo()
}
}
uuidsProjs := make(map[string]map[string]struct{})
for project, uuids := range projectsUUIDs {
for _, uuid := range uuids {
_, ok := uuidsProjs[uuid]
if !ok {
uuidsProjs[uuid] = make(map[string]struct{})
}
uuidsProjs[uuid][project] = struct{}{}
}
}
for uuid, projects := range uuidsProjs {
for project := range projects {
_, ok := uuidsProjects[uuid]
if !ok {
uuidsProjects[uuid] = []string{}
}
uuidsProjects[uuid] = append(uuidsProjects[uuid], project)
}
// fmt.Printf("%s: %+v\n", uuid, uuidsProjects[uuid])
}
status = fmt.Sprintf("Projects: %d, UUIDs: %d", len(projectsUUIDs), len(uuidsProjects))
return
}
func (s *service) DetAffRange(inSubjects []*models.EnrollmentProjectRange) (outSubjects []*models.EnrollmentProjectRange, status string, err error) {
log.Info(fmt.Sprintf("DetAffRange: in:%d", len(inSubjects)))
defer func() {
log.Info(fmt.Sprintf("DetAffRange(exit): in:%d out:%d status:%s err:%v", len(inSubjects), len(outSubjects), status, err))
}()
packSize := 1000
type rangeResult struct {
uuid string
project *string
start strfmt.DateTime
end strfmt.DateTime
setStart bool
setEnd bool
err error
}
mp := make(map[string]map[string]*models.EnrollmentProjectRange)
for _, subject := range inSubjects {
var project string
if subject.ProjectSlug != nil {
project = *subject.ProjectSlug
}
_, ok := mp[project]
if !ok {
//fmt.Printf("New project: %+v\n", project)
mp[project] = make(map[string]*models.EnrollmentProjectRange)
}
mp[project][subject.UUID] = subject
}
var subjects []map[string]models.EnrollmentProjectRange
for _, data := range mp {
//fmt.Printf("Project %s has %d uuids\n", project, len(data))
projectSubjects := make(map[string]models.EnrollmentProjectRange)
n := 0
for uuid, subject := range data {
projectSubjects[uuid] = *subject
n++
if n == packSize {
subjects = append(subjects, projectSubjects)
projectSubjects = make(map[string]models.EnrollmentProjectRange)
n = 0
}
}
if n > 0 {
subjects = append(subjects, projectSubjects)
}
}
// fmt.Printf("subjects(%d): %+v\n", len(subjects), subjects)
now := time.Now()
getRange := func(ch chan []rangeResult, subjectMap map[string]models.EnrollmentProjectRange) (res []rangeResult) {
defer func() {
if ch != nil {
ch <- res
}
}()
var (
pattern string
inf string
)
patternSet := false
uuidsCond := "author_uuid in ("
for uuid, subject := range subjectMap {
if !patternSet {
if subject.ProjectSlug != nil {
pattern = strings.TrimSpace(*subject.ProjectSlug)
if strings.HasPrefix(pattern, "/projects/") {
pattern = pattern[10:]
}
pattern = "sds-" + strings.Replace(pattern, "/", "-", -1)
pattern = pattern + "-*,-*-raw,-*-for-merge"
inf = fmt.Sprintf("getRange(%s:%d)", *subject.ProjectSlug, len(subjectMap))
} else {
pattern = "sds-*,-*-raw,-*-for-merge"
inf = fmt.Sprintf("getRange(%d)", len(subjectMap))
}
patternSet = true
}
uuidsCond += "'" + s.JSONEscape(uuid) + "',"
}
uuidsCond = uuidsCond[0:len(uuidsCond)-1] + ")"
// metadata__updated_on: the only column that is present across different data sources
// and stores 'date of creation or last update of an item in its data source (git, gerrit, etc.)'
// See: https://chaoss.github.io/grimoirelab-sigils/panels/data-status/
data := fmt.Sprintf(
`{"query":"select author_uuid, min(metadata__updated_on), max(metadata__updated_on), min(grimoire_creation_date), max(grimoire_creation_date) from \"%s\" where %s group by author_uuid","fetch_size":%d}`,
s.JSONEscape(pattern),
uuidsCond,
shared.FetchSize,
)
retErr := func(e error) {
er := errs.Wrap(errs.New(e, errs.ErrBadRequest), inf)
for uuid, subject := range subjectMap {
res = append(res, rangeResult{uuid: uuid, project: subject.ProjectSlug, err: er})
}
}
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/_sql?format=csv", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
err = fmt.Errorf("new request error: %+v for %s url: %s, data: %s", err, method, url, data)
retErr(err)
return
}
req.Header.Set("Content-Type", "application/json")
//fmt.Printf("%s: %s\n", inf, data)
resp, err := http.DefaultClient.Do(req)
if err != nil {
err = fmt.Errorf("do request error: %+v for %s url: %s, data: %s", err, method, url, data)
retErr(err)
return
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
err = fmt.Errorf("readAll non-ok request error: %+v for %s url: %s, data: %s", err, method, url, data)
retErr(err)
return
}
err = fmt.Errorf("method:%s url:%s data: %s status:%d\n%s", method, url, data, resp.StatusCode, body)
retErr(err)
return
}
reader := csv.NewReader(resp.Body)
row := []string{}
n := 0
for {
row, err = reader.Read()
if err == io.EOF {
err = nil
break
} else if err != nil {
err = fmt.Errorf("read CSV row #%d, error: %v/%T", n, err, err)
retErr(err)
return
}
n++
if n == 1 {
continue
}
r := rangeResult{}
//fmt.Printf("%s: %+v\n", inf, row)
subject, ok := subjectMap[row[0]]
if !ok {
r.err = fmt.Errorf("uuid: %s not found in sourceMap", row[0])
res = append(res, r)
continue
}
r.uuid = subject.UUID
r.project = subject.ProjectSlug
if row[1] != "" && row[3] != "" && time.Time(subject.Start) == shared.MinPeriodDate {
start1, err := s.TimeParseAny(row[1])
if err != nil {
r.err = err
res = append(res, r)
continue
}
start2, err := s.TimeParseAny(row[3])
if err != nil {
r.err = err
res = append(res, r)
continue
}
start := start1
if start2.Before(start1) {
start = start2
}
secs := now.Sub(start).Seconds()
// select * from enrollments where (minute(cast(end as time)) != 0 or second(cast(end as time)) != 0) and end < '2020-06-01' and end > '2014-01-01' and cast(end as time) not in ('18:30:00');
// add 7 seconds to mark this as a special date that was calculated
start = s.DayStart(start).Add(time.Second * time.Duration(7))
// we can set start date if that is more than 24 hours in the past (86400)
// we can set start date if mor ethan a quarter ago (7776000)
if secs >= 7776000 {
r.start = strfmt.DateTime(start)
r.setStart = true
// fmt.Printf("%s: new start date: %+v\n", inf, start)
}
}
if row[2] != "" && row[4] != "" && time.Time(subject.End) == shared.MaxPeriodDate {
end1, err := s.TimeParseAny(row[2])
if err != nil {
r.err = err
res = append(res, r)
continue
}
end2, err := s.TimeParseAny(row[4])
if err != nil {
r.err = err
res = append(res, r)
continue
}
end := end1
if end2.After(end1) {
end = end2
}
secs := now.Sub(end).Seconds()
var start time.Time
if row[1] != "" && row[3] != "" {
start1, err := s.TimeParseAny(row[1])
if err != nil {
r.err = err
res = append(res, r)
continue
}
start2, err := s.TimeParseAny(row[3])
if err != nil {
r.err = err
res = append(res, r)
continue
}
start = start1
if start2.Before(start1) {
start = start2
}
} else {
start = time.Time(subject.Start)
}
start = s.DayStart(start).Add(time.Second * time.Duration(7))
// add 7 seconds to mark this as a special date that was calculated
end = s.DayStart(end).Add(time.Hour*time.Duration(24) + time.Second*time.Duration(7))
// fmt.Printf("%s: secs: %f\n", inf, secs)
// 365.25 * 24 * 3600 = 31557600 (1 year ago)
if secs >= 31557600 && end.After(start) {
r.end = strfmt.DateTime(end)
r.setEnd = true
//fmt.Printf("%s: new end date: %+v\n", inf, end)
}
}
res = append(res, r)
}
return
}
all := len(inSubjects)
allPacks := len(subjects)
processed := 0
processedPacks := 0
ers := 0
processResult := func(resAry []rangeResult) {
processedPacks++
for _, res := range resAry {
if res.err != nil {
log.Warn(res.err.Error())
ers++
continue
}
processed++
if processed%500 == 0 {
log.Info(fmt.Sprintf("Found items %d/%d, processed packs %d/%d, detected ranges: %d, errors: %d", processed, all, processedPacks, allPacks, len(outSubjects), ers))
}
if !res.setStart && !res.setEnd {
continue
}
subject := &models.EnrollmentProjectRange{UUID: res.uuid, ProjectSlug: res.project}
if res.setStart {
subject.Start = res.start
}
if res.setEnd {
subject.End = res.end
}
// fmt.Printf("Adding: %+v\n", subject)
outSubjects = append(outSubjects, subject)
}
}
thrN := s.GetThreadsNum()
if thrN > 1 {
thrN = int(math.Round(math.Sqrt(float64(thrN))))
log.Info(fmt.Sprintf("Using %d parallel ES queries\n", thrN))
ch := make(chan []rangeResult)
nThreads := 0
for _, subjectMap := range subjects {
go getRange(ch, subjectMap)
nThreads++
if nThreads == thrN {
res := <-ch
nThreads--
processResult(res)
}
}
for nThreads > 0 {
res := <-ch
nThreads--
processResult(res)
}
} else {
for _, subjectMap := range subjects {
processResult(getRange(nil, subjectMap))
}
}
status = fmt.Sprintf("Found items %d/%d, processed packs %d/%d, detected ranges: %d, errors: %d, ES parallel threads: %d", processed, all, processedPacks, allPacks, len(outSubjects), ers, thrN)
log.Info(status)
return
}
// projectSlugToIndexPattern - single project to its index pattern (all data sources)
func (s *service) projectSlugToIndexPattern(projectSlug string) (pattern string) {
log.Info(fmt.Sprintf("projectSlugToIndexPattern: projectSlug:%s", projectSlug))
defer func() {
log.Info(fmt.Sprintf("projectSlugToIndexPattern(exit): projectSlug:%s pattern:%s", projectSlug, pattern))
}()
pattern = strings.TrimSpace(projectSlug)
if strings.HasPrefix(pattern, "/projects/") {
pattern = pattern[10:]
}
pattern = "sds-" + strings.Replace(pattern, "/", "-", -1)
pattern = pattern + "-*,-*-raw,-*-for-merge"
return
}
// projectSlugsToIndexPattern - multiple projects to their index pattern (all data sources)
func (s *service) projectSlugsToIndexPattern(projectSlugs []string) (pattern string) {
log.Info(fmt.Sprintf("projectSlugsToIndexPattern: projectSlugs:%+v", projectSlugs))
defer func() {
log.Info(fmt.Sprintf("projectSlugsToIndexPattern(exit): projectSlugs:%+v pattern:%s", projectSlugs, pattern))
}()
for _, projectSlug := range projectSlugs {
pat := strings.TrimSpace(projectSlug)
if strings.HasPrefix(pattern, "/projects/") {
pat = pat[10:]
}
pat = "sds-" + strings.Replace(pat, "/", "-", -1) + "-*"
if pattern == "" {
pattern = pat
} else {
pattern += "," + pat
}
}
pattern = pattern + ",-*-raw,-*-for-merge"
return
}
// projectSlugToIndexPatterns - single project to its multiple data source index patterns
func (s *service) projectSlugToIndexPatterns(projectSlug string, dataSourceTypes []string) (patterns []string) {
log.Info(fmt.Sprintf("projectSlugToIndexPatterns: projectSlug:%s dataSourceTypes:%+v", projectSlug, dataSourceTypes))
defer func() {
log.Info(fmt.Sprintf("projectSlugToIndexPatterns(exit): projectSlug:%s dataSourceTypes:%+v patterns:%+v", projectSlug, dataSourceTypes, patterns))
}()
patternRoot := strings.TrimSpace(projectSlug)
if strings.HasPrefix(patternRoot, "/projects/") {
patternRoot = patternRoot[10:]
}
dataSourceTypes = s.mapDataSourceTypes(dataSourceTypes)
patternRoot = "sds-" + strings.Replace(patternRoot, "/", "-", -1) + "-"
for _, dataSourceType := range dataSourceTypes {
dataSourceType = strings.Replace(dataSourceType, "/", "-", -1)
pat := patternRoot + dataSourceType
// in case of plain "github" add a wildcard to hit all GitHub indices
if dataSourceType == "github" {
pat = pat + "*"
}
patterns = append(patterns, pat+",-*-raw,-*-for-merge")
}
return
}
// projectSlugsToIndexPatterns - multiple projects to their multiple data source index patterns
func (s *service) projectSlugsToIndexPatterns(projectSlugs []string, dataSourceTypes []string) (patterns []string) {
log.Info(fmt.Sprintf("projectSlugsToIndexPatterns: projectSlugs:%+v dataSourceTypes:%+v", projectSlugs, dataSourceTypes))
defer func() {
log.Info(fmt.Sprintf("projectSlugsToIndexPatterns(exit): projectSlugs:%+v dataSourceTypes:%+v patterns:%+v", projectSlugs, dataSourceTypes, patterns))
}()
patternRoot := []string{}
for _, projectSlug := range projectSlugs {
pat := strings.TrimSpace(projectSlug)
if strings.HasPrefix(pat, "/projects/") {
pat = pat[10:]
}
pat = "sds-" + strings.Replace(pat, "/", "-", -1) + "-"
patternRoot = append(patternRoot, pat)
}
dataSourceTypes = s.mapDataSourceTypes(dataSourceTypes)
for _, dataSourceType := range dataSourceTypes {
dataSourceType = strings.Replace(dataSourceType, "/", "-", -1)
pattern := ""
for _, root := range patternRoot {
pat := root + dataSourceType
// in case of plain "github" add a wildcard to hit all GitHub indices
if dataSourceType == "github" {
pat = pat + "*"
}
if pattern == "" {
pattern = pat
} else {
pattern += "," + pat
}
}
patterns = append(patterns, pattern+",-*-raw,-*-for-merge")
}
return
}
// mapDataSourceTypes - return data source types replacing github/pull_request with github/issue
// TOPCON
func (s *service) mapDataSourceTypes(dataSourceTypes []string) (outDataSourceTypes []string) {
for _, dst := range dataSourceTypes {
if dst == "github/pull_request" {
dst = "github/issue"
}
outDataSourceTypes = append(outDataSourceTypes, dst)
}
return
}
func (s *service) GetUnaffiliated(projectSlugs []string, topN int64) (getUnaffiliated *models.GetUnaffiliatedOutput, err error) {
log.Info(fmt.Sprintf("GetUnaffiliated: projectSlugs:%+v topN:%d", projectSlugs, topN))
pattern := ""
getUnaffiliated = &models.GetUnaffiliatedOutput{}
defer func() {
unaff := ""
nUnaffiliated := len(getUnaffiliated.Unaffiliated)
if nUnaffiliated > shared.LogListMax {
unaff = fmt.Sprintf("%d", nUnaffiliated)
} else {
unaff = fmt.Sprintf("%+v", s.ToLocalUnaffiliatedObj(getUnaffiliated))
}
log.Info(
fmt.Sprintf(
"GetUnaffiliated(exit): projectSlugs:%+v topN:%d pattern:%s getUnaffiliated:%+v err:%v",
projectSlugs,
topN,
pattern,
unaff,
err,
),
)
}()
pattern = s.projectSlugsToIndexPattern(projectSlugs)
getUnaffiliated.Unaffiliated, err = s.AggsUnaffiliated(pattern, topN)
return
}
func (s *service) AggsUnaffiliated(indexPattern string, topN int64) (unaffiliated []*models.UnaffiliatedDataOutput, err error) {
log.Info(fmt.Sprintf("AggsUnaffiliated: index:%s topN:%d", indexPattern, topN))
if topN <= 0 {
topN = 2147483647
}
data := `{"size":0,"aggs":{"unaffiliated":{"filter":{"terms":{"author_org_name":["Unknown","NotFound","","-","?"]}},"aggs":{"unaffiliated":{"terms":{"field":"author_uuid","missing":"","size":`
data += fmt.Sprintf("%d", topN)
data += "}}}}}}"
defer func() {
unaff := ""
nUnaffiliated := len(unaffiliated)
if nUnaffiliated > shared.LogListMax {
unaff = fmt.Sprintf("%d", nUnaffiliated)
} else {
unaff = fmt.Sprintf("%+v", s.ToLocalUnaffiliated(unaffiliated))
}
log.Info(
fmt.Sprintf(
"AggsUnaffiliated(exit): index:%s topN:%d data:%s unaffiliated:%+v err:%v",
indexPattern,
topN,
data,
unaff,
err,
),
)
}()
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
var res *esapi.Response
res, err = s.search(indexPattern, payloadBody)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ES.search.request")
return
}
defer res.Body.Close()
if res.IsError() {
var e map[string]interface{}
if err = jsoniter.NewDecoder(res.Body).Decode(&e); err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ES.search.result.decode")
return
}
err = fmt.Errorf("[%s] %s: %s", res.Status(), e["error"].(map[string]interface{})["type"], e["error"].(map[string]interface{})["reason"])
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ES.search.result")
return
}
var result aggsUnaffiliatedResult
if err = jsoniter.NewDecoder(res.Body).Decode(&result); err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ES.search.aggs.decode")
return
}
for _, bucket := range result.Aggregations.Unaffiliated.Unaffiliated.Buckets {
// We don't have Name here yet (from the ES aggregation)
unaffiliated = append(unaffiliated, &models.UnaffiliatedDataOutput{Contributions: bucket.DocCount, UUID: bucket.Key})
}
return
}
// ContributorsCount - returns the number of distinct author_uuids in a given index pattern
func (s *service) ContributorsCount(indexPattern, cond string) (cnt int64, err error) {
log.Info(fmt.Sprintf("ContributorsCount: indexPattern:%s cond:%s", indexPattern, cond))
defer func() {
log.Info(fmt.Sprintf("ContributorsCount(exit): indexPattern:%s cond:%s cnt:%d err:%v", indexPattern, cond, cnt, err))
}()
var data string
if cond == "" {
data = fmt.Sprintf(`{"query":"select count(distinct author_uuid) as cnt from \"%s\""}`, s.JSONEscape(indexPattern))
} else {
data = fmt.Sprintf(`{"query":"select count(distinct author_uuid) as cnt from \"%s\" where true %s"}`, s.JSONEscape(indexPattern), cond)
re1 := regexp.MustCompile(`\r?\n`)
re2 := regexp.MustCompile(`\s+`)
data = strings.TrimSpace(re1.ReplaceAllString(re2.ReplaceAllString(data, " "), " "))
}
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/_sql?format=csv", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
err = fmt.Errorf("new request error: %+v for %s url: %s, data: %s", err, method, url, data)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ContributorsCount")
return
}
// fmt.Printf(">>> ContributorsCount: curl -s -XPOST -H 'Content-Type: application/json' %s -d'%s'\n", url, data)
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
err = fmt.Errorf("do request error: %+v for %s url: %s, data: %s", err, method, url, data)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ContributorsCount")
return
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
err = fmt.Errorf("readAll non-ok request error: %+v for %s url: %s, data: %s", err, method, url, data)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ContributorsCount")
return
}
err = fmt.Errorf("method:%s url:%s data: %s status:%d\n%s", method, url, data, resp.StatusCode, body)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ContributorsCount")
return
}
reader := csv.NewReader(resp.Body)
row := []string{}
n := 0
for {
row, err = reader.Read()
if err == io.EOF {
err = nil
break
} else if err != nil {
err = fmt.Errorf("read CSV row #%d, error: %v/%T", n, err, err)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "ContributorsCount")
return
}
n++
if n == 2 {
var fcnt float64
fcnt, err = strconv.ParseFloat(row[0], 64)
cnt = int64(fcnt)
break
}
}
return
}
// Top contributor functions
func (s *service) getAllStringFields(indexPattern string) (fields []string, err error) {
log.Info(fmt.Sprintf("getAllStringFields: indexPattern:%s", indexPattern))
defer func() {
log.Info(fmt.Sprintf("getAllStringFields(exit): indexPattern:%s fields:%+v err:%v", indexPattern, fields, err))
}()
data := fmt.Sprintf(`{"query":"show columns in \"%s\""}`, s.JSONEscape(indexPattern))
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/_sql?format=csv", s.url)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
err = fmt.Errorf("new request error: %+v for %s url: %s, data: %s", err, method, url, data)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "getAllStringFields")
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
err = fmt.Errorf("do request error: %+v for %s url: %s, data: %s", err, method, url, data)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "getAllStringFields")
return
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
err = fmt.Errorf("readAll non-ok request error: %+v for %s url: %s, data: %s", err, method, url, data)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "getAllStringFields")
return
}
err = fmt.Errorf("method:%s url:%s data: %s status:%d\n%s", method, url, data, resp.StatusCode, body)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "getAllStringFields")
return
}
reader := csv.NewReader(resp.Body)
row := []string{}
n := 0
for {
row, err = reader.Read()
if err == io.EOF {
err = nil
break
} else if err != nil {
err = fmt.Errorf("read CSV row #%d, error: %v/%T", n, err, err)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "getAllStringFields")
return
}
n++
// hash_short,VARCHAR,keyword
if row[1] == "VARCHAR" && row[2] == "keyword" {
fields = append(fields, row[0])
}
}
return
}
func (s *service) dataSourceQuery(query string) (result map[string][]string, drop bool, err error) {
log.Info(fmt.Sprintf("dataSourceQuery: query:%d", len(query)))
defer func() {
l := 0
r, ok := result["author_uuid"]
if ok {
l = len(r)
}
log.Info(fmt.Sprintf("dataSourceQuery(exit): query:%d result:%d err:%v", len(query), l, err))
}()
payloadBytes := []byte(query)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/_sql?format=csv", s.url)
// url := fmt.Sprintf("%s/_sql/translate", s.url)
// fmt.Printf(">>> dataSourceQuery: curl -s -XPOST -H 'Content-Type: application/json' %s -d'%s'\n", url, query)
req, err := http.NewRequest(method, url, payloadBody)
if err != nil {
err = fmt.Errorf("new request error: %+v for %s url: %s, query: %s", err, method, url, query)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "dataSourceQuery")
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
err = fmt.Errorf("do request error: %+v for %s url: %s query: %s", err, method, url, query)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "dataSourceQuery")
return
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
err = fmt.Errorf("readAll non-ok request error: %+v for %s url: %s query: %s", err, method, url, query)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "dataSourceQuery")
return
}
err = fmt.Errorf("method:%s url:%s status:%d\nquery:\n%s\nbody:\n%s", method, url, resp.StatusCode, query, body)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "dataSourceQuery")
if strings.Contains(err.Error(), " Unknown index ") || strings.Contains(err.Error(), " Unknown column ") {
log.Warn(fmt.Sprintf("unknown index or column: %v for query: %s\n", err, query))
err = nil
drop = true
}
return
}
log.Debug(fmt.Sprintf("Query: %s", query))
reader := csv.NewReader(resp.Body)
row := []string{}
n := 0
i2n := make(map[int]string)
for {
row, err = reader.Read()
if err == io.EOF {
err = nil
break
} else if err != nil {
err = fmt.Errorf("read CSV row #%d, error: %v/%T", n, err, err)
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "dataSourceQuery")
return
}
n++
log.Debug(fmt.Sprintf("Row #%d: %+v", n, row))
if n == 1 {
result = make(map[string][]string)
for i, col := range row {
i2n[i] = col
result[col] = []string{}
}
continue
}
for i, val := range row {
col := i2n[i]
ary := result[col]
ary = append(ary, val)
result[col] = ary
}
}
return
}
func (s *service) searchCondition(indexPattern, search string) (condition string, err error) {
// Example search queries:
// 'author_org_name=re:Red Hat.*'
// 'all=red*hat'
// 'author_name,committer_name,reporter_name=re: *[jJ]ohn( [sS]mith)? *'
// 'at&t'
// 're:.*[iI][nN][cC].?'
// 'author_org_name=re:.*([gG]oogle|[rR]ed *[hH]at).*'
log.Info(fmt.Sprintf("searchCondition: indexPattern:%s search:%s", indexPattern, search))
defer func() {
log.Info(fmt.Sprintf("searchCondition(exit): indexPattern:%s search:%s condition:%s err:%v", indexPattern, search, condition, err))
}()
if search == "" {
return
}
ary := strings.Split(search, "=")
if len(ary) > 1 {
fields := ary[0]
fieldsAry := strings.Split(fields, ",")
if strings.TrimSpace(fieldsAry[0]) == "" {
return
}
values := ary[1]
valuesAry := strings.Split(values, ",")
if strings.TrimSpace(valuesAry[0]) == "" {
return
}
if len(fieldsAry) == 1 && fieldsAry[0] == "all" {
fieldsAry, err = s.getAllStringFields(indexPattern)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "searchCondition")
return
}
}
for _, value := range valuesAry {
value := s.SpecialUnescape(s.JSONEscape(s.ToCaseInsensitiveRegexp(value)))
for _, field := range fieldsAry {
field = `\"` + s.JSONEscape(field) + `\"`
if condition == "" {
condition = "and (" + field + " rlike " + value
} else {
condition += " or " + field + " rlike " + value
}
}
}
if condition != "" {
condition += ")"
}
fmt.Printf("searchCondition: '%s' => '%s'\n", search, condition)
} else {
escaped := s.SpecialUnescape(s.JSONEscape(s.ToCaseInsensitiveRegexp(search)))
condition = fmt.Sprintf(`
and (\"author_name\" rlike %[1]s
or \"author_org_name\" rlike %[1]s
or \"author_uuid\" rlike %[1]s)
`,
escaped,
)
fmt.Printf("searchCondition: '%s' => '%s'\n", search, condition)
}
return
}
func (s *service) dataSourceTypeFields(dataSourceType string) (fields map[string]string, err error) {
log.Info(fmt.Sprintf("dataSourceTypeFields: dataSourceType:%s", dataSourceType))
defer func() {
log.Info(fmt.Sprintf("dataSourceTypeFields(exit): dataSourceType:%s fields:%+v err:%v", dataSourceType, fields, err))
}()
// TOPCON
switch dataSourceType {
case "git":
fields = map[string]string{
"git_commits": "count(distinct hash) as git_commits",
"git_lines_added": "sum(lines_added) as git_lines_added",
"git_lines_removed": "sum(lines_removed) as git_lines_removed",
"git_lines_changed": "sum(lines_changed) as git_lines_changed",
}
case "gerrit":
fields = map[string]string{
"gerrit_approvals": "sum(is_gerrit_approval) as gerrit_approvals",
"gerrit_changesets": "sum(is_gerrit_changeset) as gerrit_changesets",
"gerrit_merged_changesets": "count(status) as gerrit_merged_changesets",
"gerrit_comments": "count(is_gerrit_comment) as gerrit_comments",
}
case "jira":
fields = map[string]string{
"jira_issues_created": "count(distinct key) as jira_issues_created",
"jira_issues_assigned": "count(distinct assignee_uuid) as jira_issues_assigned",
"jira_issues_closed": "count(distinct assignee_uuid) as jira_issues_closed",
"jira_comments": "count(distinct comment_id) as jira_comments",
"jira_average_issue_open_days": "avg(time_to_close_days) as jira_average_issue_open_days",
}
case "confluence":
fields = map[string]string{
"confluence_pages_created": "sum(is_new_page) as confluence_pages_created",
"confluence_pages_edited": "sum(is_page) as confluence_pages_edited",
"confluence_comments": "sum(is_comment) as confluence_comments",
"confluence_blog_posts": "sum(is_blogpost) as confluence_blog_posts",
"confluence_attachments": "sum(is_attachment) as confluence_attachments",
"confluence_last_action_date": "max(metadata__updated_on) as confluence_last_action_date",
}
case "github/issue":
fields = map[string]string{
"github_issue_issues_created": "count(distinct id) as github_issue_issues_created",
"github_issue_issues_assigned": "count(distinct issue_id) as github_issue_issues_assigned",
"github_issue_issues_closed": "count(distinct id) as github_issue_issues_closed",
"github_issue_average_time_open_days": "avg(time_open_days) as github_issue_average_time_open_days",
"github_issue_issues_comments": "count(distinct id) as github_issue_issues_comments",
}
case "github/pull_request":
fields = map[string]string{
"github_pull_request_prs_created": "count(distinct id) as github_pull_request_prs_created",
"github_pull_request_prs_merged": "count(distinct id) as github_pull_request_prs_merged",
"github_pull_request_prs_open": "count(distinct id) as github_pull_request_prs_open",
"github_pull_request_prs_closed": "count(distinct id) as github_pull_request_prs_closed",
"github_pull_request_prs_reviewed": "count(distinct pull_request_id) as github_pull_request_prs_reviewed",
"github_pull_request_prs_approved": "count(distinct pull_request_id) as github_pull_request_prs_approved",
"github_pull_request_prs_review_comments": "count(distinct pull_request_review_id) as github_pull_request_prs_review_comments",
"github_pull_request_prs_comment_activity": "count(distinct id) as github_pull_request_prs_comment_activity",
}
case "bugzillarest":
fields = map[string]string{
"bugzilla_issues_created": "count(distinct url) as bugzilla_issues_created",
"bugzilla_issues_closed": "count(is_open) as bugzilla_issues_closed",
"bugzilla_issues_assigned": "count(distinct url) as bugzilla_issues_assigned",
"bugzilla_average_issue_open_days": "avg(timeopen_days) as bugzilla_average_issue_open_days",
}
case "bugzilla":
fields = map[string]string{
"bugzilla_issues_created": "count(distinct url) as bugzilla_issues_created",
"bugzilla_issues_closed": "count(status) as bugzilla_issues_closed",
"bugzilla_issues_assigned": "count(distinct url) as bugzilla_issues_assigned",
"bugzilla_average_issue_open_days": "avg(timeopen_days) as bugzilla_average_issue_open_days",
}
default:
// FIXME: in the future create err log.Error it and return error to caller (now only logs)
log.Error("elastic/service.go", errs.Wrap(errs.New(fmt.Errorf("unknown data source type: %s", dataSourceType), errs.ErrBadRequest), "dataSourceTypeFields"))
}
return
}
func (s *service) additionalWhere(dataSourceType, sortField string) (cond string, err error) {
log.Info(fmt.Sprintf("additionalWhere: dataSourceType:%s sortField:%s", dataSourceType, sortField))
defer func() {
log.Info(fmt.Sprintf("additionalWhere(exit): dataSourceType:%s sortField:%s cond:%s err:%v", dataSourceType, sortField, cond, err))
}()
// TOPCON
switch dataSourceType {
case "all":
switch sortField {
case "cnt", "author_uuid":
return
}
case "git":
if len(sortField) > 4 && sortField[:4] != "git_" {
return
}
switch sortField {
case "git_commits", "cnt":
cond = `and \"type\" = 'commit' and \"hash\" is not null and (\"lines_changed\" > 0 or \"lines_added\" > 0 or \"lines_removed\" > 0)`
return
case "git_lines_added", "git_lines_removed", "git_lines_changed":
sortField := sortField[4:]
cond = fmt.Sprintf(`and \"type\" = 'commit' and \"%s\" is not null`, s.JSONEscape(sortField))
return
}
case "gerrit":
if len(sortField) > 7 && sortField[:7] != "gerrit_" {
return
}
switch sortField {
case "gerrit_approvals":
cond = `and \"is_gerrit_approval\" is not null`
return
case "gerrit_changesets":
cond = `and \"is_gerrit_changeset\" is not null`
return
case "gerrit_comments":
cond = `and \"is_gerrit_comment\" is not null`
return
case "gerrit_merged_changesets":
cond = `and \"status\" = 'MERGED'`
return
case "cnt":
return
}
case "jira":
if len(sortField) > 5 && sortField[:5] != "jira_" {
return
}
switch sortField {
case "jira_issues_created", "cnt":
cond = `and \"key\" is not null`
return
case "jira_issues_assigned":
cond = `and \"assignee_uuid\" is not null`
return
case "jira_average_issue_open_days":
cond = `and \"time_to_close_days\" is not null`
return
case "jira_comments":
cond = `and \"comment_id\" is not null and \"type\" = 'comment'`
return
case "jira_issues_closed":
cond = `and \"assignee_uuid\" is not null and \"status\" in ('Closed', 'Resolved', 'Done')`
return
}
case "confluence":
if len(sortField) > 11 && sortField[:11] != "confluence_" {
return
}
switch sortField {
case "confluence_pages_created":
cond = `and \"is_new_page\" is not null`
return
case "confluence_pages_edited":
cond = `and \"is_page\" is not null`
return
case "confluence_comments":
cond = `and \"is_comment\" is not null`
return
case "confluence_blog_posts":
cond = `and \"is_blogpost\" is not null`
return
case "confluence_attachments":
cond = `and \"is_attachment\" is not null`
return
case "confluence_last_action_date":
cond = `and \"metadata__updated_on\" is not null`
return
case "cnt":
return
}
case "github/issue":
if len(sortField) > 13 && sortField[:13] != "github_issue_" {
return
}
switch sortField {
case "github_issue_issues_created", "github_issue_average_time_open_days", "cnt":
cond = `and \"type\" = 'issue' and \"id\" is not null and \"pull_request\" = false`
return
case "github_issue_issues_closed":
cond = `and \"type\" = 'issue' and \"id\" is not null and \"pull_request\" = false and \"state\" = 'closed'`
return
case "github_issue_issues_assigned":
cond = `and \"type\" = 'issue_assignee' and \"issue_id\" is not null and \"pull_request\" = false`
return
case "github_issue_issues_comments":
cond = `and \"type\" = 'issue_comment' and \"id\" is not null and \"pull_request\" = false`
return
}
case "github/pull_request":
if len(sortField) > 20 && sortField[:20] != "github_pull_request_" {
return
}
switch sortField {
case "github_pull_request_prs_created", "cnt":
cond = `and \"type\" = 'pull_request' and \"id\" is not null and \"pull_request\" = true`
return
case "github_pull_request_prs_merged":
cond = `and \"type\" = 'pull_request' and \"id\" is not null and \"pull_request\" = true and length(\"merged_by_data_uuid\") = 40 and \"merged\" = true`
return
case "github_pull_request_prs_open":
cond = `and \"type\" = 'pull_request' and \"id\" is not null and \"pull_request\" = true and \"state\" = 'open'`
return
case "github_pull_request_prs_closed":
cond = `and \"type\" = 'pull_request' and \"id\" is not null and \"pull_request\" = true and \"state\" = 'closed'`
return
case "github_pull_request_prs_reviewed":
cond = `and \"type\" = 'pull_request_review' and \"pull_request_id\" is not null`
return
case "github_pull_request_prs_approved":
cond = `and \"type\" = 'pull_request_review' and \"pull_request_id\" is not null and \"state\" = 'APPROVED'`
return
case "github_pull_request_prs_review_comments":
cond = `and \"type\" = 'pull_request_review' and \"pull_request_id\" is not null and \"pull_request_review_id\" is not null`
return
case "github_pull_request_prs_comment_activity":
cond = `and (\"type\" in ('pull_request_review', 'pull_request_comment') or (\"type\" = 'issue_comment' and \"pull_request\" = true)) and \"id\" is not null`
return
}
case "bugzillarest":
if len(sortField) > 9 && sortField[:9] != "bugzilla_" {
return
}
switch sortField {
case "bugzilla_issues_created", "cnt":
cond = `and \"url\" is not null`
return
case "bugzilla_issues_closed":
cond = ` and \"url\" is not null and \"is_open\" = false`
return
case "bugzilla_issues_assigned":
cond = `and \"assigned_to_uuid\" is not null`
return
case "bugzilla_average_issue_open_days":
cond = `and \"timeopen_days\" is not null`
return
}
case "bugzilla":
if len(sortField) > 9 && sortField[:9] != "bugzilla_" {
return
}
switch sortField {
case "bugzilla_issues_created", "cnt":
cond = `and \"url\" is not null`
return
case "bugzilla_issues_closed":
cond = ` and \"url\" is not null and \"status\" in ('CLOSED', 'RESOLVED')`
return
case "bugzilla_issues_assigned":
cond = `and \"assigned_to_uuid\" is not null`
return
case "bugzilla_average_issue_open_days":
cond = `and \"timeopen_days\" is not null`
return
}
}
err = errs.Wrap(errs.New(fmt.Errorf("unknown dataSourceType/sortField: %s/%s", dataSourceType, sortField), errs.ErrBadRequest), "additionalWhere")
return
}
func (s *service) having(dataSourceType, sortField string) (cond string, err error) {
log.Info(fmt.Sprintf("having: dataSourceType:%s sortField:%s", dataSourceType, sortField))
defer func() {
log.Info(fmt.Sprintf("having(exit): dataSourceType:%s sortField:%s cond:%s err:%v", dataSourceType, sortField, cond, err))
}()
if sortField == "cnt" {
return
}
// TOPCON
switch dataSourceType {
case "all":
switch sortField {
case "cnt", "author_uuid":
return
}
case "git":
if len(sortField) > 4 && sortField[:4] != "git_" {
return
}
switch sortField {
case "git_commits", "git_lines_added", "git_lines_removed", "git_lines_changed":
cond = fmt.Sprintf(`having \"%s\" >= 0`, s.JSONEscape(sortField))
return
}
case "gerrit":
if len(sortField) > 7 && sortField[:7] != "gerrit_" {
return
}
switch sortField {
case "gerrit_approvals", "gerrit_changesets", "gerrit_merged_changesets", "gerrit_comments":
cond = fmt.Sprintf(`having \"%s\" >= 0`, s.JSONEscape(sortField))
return
}
case "jira":
if len(sortField) > 5 && sortField[:5] != "jira_" {
return
}
switch sortField {
case "jira_issues_created", "jira_issues_assigned", "jira_average_issue_open_days", "jira_comments", "jira_issues_closed":
cond = fmt.Sprintf(`having \"%s\" >= 0`, s.JSONEscape(sortField))
return
}
case "confluence":
if len(sortField) > 11 && sortField[:11] != "confluence_" {
return
}
switch sortField {
case "confluence_pages_created", "confluence_pages_edited", "confluence_comments", "confluence_blog_posts", "confluence_attachments":
cond = fmt.Sprintf(`having \"%s\" >= 0`, s.JSONEscape(sortField))
return
case "confluence_last_action_date":
cond = `having \"confluence_last_action_date\" >= '1900-01-01'::timestamp`
return
}
case "github/issue":
if len(sortField) > 13 && sortField[:13] != "github_issue_" {
return
}
switch sortField {
case "github_issue_issues_created", "github_issue_average_time_open_days", "github_issue_issues_assigned", "github_issue_issues_closed", "github_issue_issues_comments":
cond = fmt.Sprintf(`having \"%s\" >= 0`, s.JSONEscape(sortField))
return
}
case "github/pull_request":
if len(sortField) > 20 && sortField[:20] != "github_pull_request_" {
return
}
switch sortField {
case "github_pull_request_prs_created", "github_pull_request_prs_merged", "github_pull_request_prs_closed", "github_pull_request_prs_open", "github_pull_request_prs_reviewed", "github_pull_request_prs_approved", "github_pull_request_prs_review_comments", "github_pull_request_prs_comment_activity":
cond = fmt.Sprintf(`having \"%s\" >= 0`, s.JSONEscape(sortField))
return
}
case "bugzilla", "bugzillarest":
if len(sortField) > 9 && sortField[:9] != "bugzilla_" {
return
}
switch sortField {
case "bugzilla_issues_created", "bugzilla_issues_closed", "bugzilla_issues_assigned", "bugzilla_average_issue_open_days":
cond = fmt.Sprintf(`having \"%s\" >= 0`, s.JSONEscape(sortField))
return
}
}
err = errs.Wrap(errs.New(fmt.Errorf("unknown dataSourceType/sortField: %s/%s", dataSourceType, sortField), errs.ErrBadRequest), "having")
return
}
func (s *service) orderBy(dataSourceType, sortField, sortOrder string) (order string, err error) {
log.Info(fmt.Sprintf("orderBy: dataSourceType:%s sortField:%s", dataSourceType, sortField))
defer func() {
log.Info(fmt.Sprintf("orderBy(exit): dataSourceType:%s sortField:%s cond:%s err:%v", dataSourceType, sortField, order, err))
}()
dir := ""
if sortOrder == "" || strings.ToLower(sortOrder) == "desc" {
dir = "desc"
} else if strings.ToLower(sortOrder) == "asc" {
dir = "asc"
} else {
err = errs.Wrap(errs.New(fmt.Errorf("unknown sortOrder: %s", sortOrder), errs.ErrBadRequest), "orderBy")
return
}
// TOPCON
switch dataSourceType {
case "all":
switch sortField {
case "author_uuid":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
case "git":
switch sortField {
case "git_commits", "git_lines_added", "git_lines_removed", "git_lines_changed":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
case "gerrit":
switch sortField {
case "gerrit_approvals", "gerrit_changesets", "gerrit_merged_changesets", "gerrit_comments":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
case "jira":
switch sortField {
case "jira_issues_created", "jira_issues_assigned", "jira_average_issue_open_days", "jira_comments", "jira_issues_closed":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
case "confluence":
switch sortField {
case "confluence_pages_created", "confluence_pages_edited", "confluence_comments", "confluence_blog_posts", "confluence_attachments", "confluence_last_action_date":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
case "github/issue":
switch sortField {
case "github_issue_issues_created", "github_issue_average_time_open_days", "github_issue_issues_assigned", "github_issue_issues_closed", "github_issue_issues_comments":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
case "github/pull_request":
switch sortField {
case "github_pull_request_prs_created", "github_pull_request_prs_merged", "github_pull_request_prs_closed", "github_pull_request_prs_open", "github_pull_request_prs_reviewed", "github_pull_request_prs_approved", "github_pull_request_prs_review_comments", "github_pull_request_prs_comment_activity":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
case "bugzilla", "bugzillarest":
switch sortField {
case "bugzilla_issues_created", "bugzilla_issues_closed", "bugzilla_issues_assigned", "bugzilla_average_issue_open_days":
order = fmt.Sprintf(`order by \"%s\" %s`, s.JSONEscape(sortField), dir)
return
}
}
order = `order by \"cnt\" desc`
return
}
func (s *service) contributorStatsMergeQuery(
dataSourceType, indexPattern, column, columnStr, search, uuids string,
from, to int64,
useSearch bool,
) (jsonStr string, err error) {
log.Debug(
fmt.Sprintf(
"contributorStatsMergeQuery: dataSourceType:%s indexPattern:%s column:%s columnStr:%s search:%s uuids:%s from:%d to:%d useSearch:%v",
dataSourceType, indexPattern, column, columnStr, search, uuids, from, to, useSearch,
),
)
defer func() {
log.Debug(
fmt.Sprintf(
"contributorStatsMergeQuery(exit): dataSourceType:%s indexPattern:%s column:%s columnStr:%s search:%s uuids:%s from:%d to:%d useSearch:%v jsonStr:%s err:%v",
dataSourceType, indexPattern, column, columnStr, search, uuids, from, to, useSearch, jsonStr, err,
),
)
}()
if !useSearch {
search = ""
}
additionalWhereStr := ""
havingStr := ""
additionalWhereStr, err = s.additionalWhere(dataSourceType, column)
if err != nil {
err = errs.Wrap(err, "contributorStatsMergeQuery")
return
}
havingStr, err = s.having(dataSourceType, column)
if err != nil {
err = errs.Wrap(err, "contributorStatsMergeQuery")
return
}
data := fmt.Sprintf(`
select
\"author_uuid\", %s
from
\"%s\"
where
\"author_uuid\" is not null
and length(\"author_uuid\") = 40
and not (\"author_bot\" = true)
and cast(\"metadata__updated_on\" as long) >= %d
and cast(\"metadata__updated_on\" as long) < %d
%s
%s
%s
group by
\"author_uuid\"
%s
`,
columnStr,
s.JSONEscape(indexPattern),
from,
to,
search,
additionalWhereStr,
uuids,
havingStr,
)
re1 := regexp.MustCompile(`\r?\n`)
re2 := regexp.MustCompile(`\s+`)
data = strings.TrimSpace(re1.ReplaceAllString(re2.ReplaceAllString(data, " "), " "))
jsonStr = fmt.Sprintf(`{"query":"`+data+`", "fetch_size":%d}`, shared.FetchSize)
return
}
func (s *service) contributorStatsMainQuery(
dataSourceType, indexPattern, column string,
from, to, limit, offset int64,
search, sortField, sortOrder string,
) (jsonStr string, err error) {
log.Debug(
fmt.Sprintf(
"contributorStatsMainQuery: dataSourceType:%s indexPattern:%s column:%s from:%d to:%d limit:%d offset:%d search:%s sortField:%s sortOrder:%s",
dataSourceType, indexPattern, column, from, to, limit, offset, search, sortField, sortOrder,
),
)
defer func() {
log.Debug(
fmt.Sprintf(
"contributorStatsMainQuery(exit): dataSourceType:%s indexPattern:%s column:%s from:%d to:%d limit:%d offset:%d search:%s sortField:%s sortOrder:%s jsonStr:%s err:%v",
dataSourceType, indexPattern, column, from, to, limit, offset, search, sortField, sortOrder, jsonStr, err,
),
)
}()
additionalWhereStr := ""
havingStr := ""
orderByClause := ""
additionalWhereStr, err = s.additionalWhere(dataSourceType, sortField)
if err != nil {
err = errs.Wrap(err, "contributorStatsMainQuery")
return
}
havingStr, err = s.having(dataSourceType, sortField)
if err != nil {
err = errs.Wrap(err, "contributorStatsMainQuery")
return
}
orderByClause, err = s.orderBy(dataSourceType, sortField, sortOrder)
if err != nil {
err = errs.Wrap(err, "contributorStatsMainQuery")
return
}
iLimit := (offset + 1) * limit
if iLimit > shared.MaxAggsSize {
iLimit = shared.MaxAggsSize
}
data := fmt.Sprintf(`
select
\"author_uuid\", %s
from
\"%s\"
where
\"author_uuid\" is not null
and length(\"author_uuid\") = 40
and not (\"author_bot\" = true)
and cast(\"metadata__updated_on\" as long) >= %d
and cast(\"metadata__updated_on\" as long) < %d
%s
%s
group by
\"author_uuid\"
%s
%s
limit %d
`,
column,
s.JSONEscape(indexPattern),
from,
to,
search,
additionalWhereStr,
havingStr,
orderByClause,
iLimit,
)
re1 := regexp.MustCompile(`\r?\n`)
re2 := regexp.MustCompile(`\s+`)
data = strings.TrimSpace(re1.ReplaceAllString(re2.ReplaceAllString(data, " "), " "))
jsonStr = fmt.Sprintf(`{"query":"`+data+`", "fetch_size":%d}`, shared.FetchSize)
return
}
func (s *service) GetTopContributors(projectSlugs []string, dataSourceTypes []string, from, to, limit, offset int64, search, sortField, sortOrder string) (top *models.TopContributorsFlatOutput, err error) {
if sortField == "confluence_days_since_last_documentation" {
sortField = "confluence_last_action_date"
}
// Set this to true, to apply search filters to merge queries too
// This can discard some users, even if they're specified in uuids array
// Because search condition can be slightly different per data source type (esepecially in all=value)
// This is because in all=value mode, list of columns to search for 'value'
// is different in each index pattern (some columns are data source type specific)
// If we set this to false, only UUIDs from the main query will be used as a condition
useSearchInMergeQueries := os.Getenv("USE_SEARCH_IN_MERGE") != ""
// useCaptureAllPatternToCountContributors specifies how to count all contributors:
// true: will use pattern matching all current project(s) data so for example 'sds-proj1-*,sds-proj2-*,...,sds-projN-*,-*-raw,-*-for-merge'
// this can give more contributors than actual results, because the main query depending on 'sort_filed' will query one of data-sources, not all of them
// false: will use the pattern as the main data query uses (depending on sort_field), this will give the same number of records (so pagination will always be OK)
// but when sort_filed is changed, numbe rof contributors will change too
useCaptureAllPatternToCountContributors := false
// dataSourceTypes = []string{"git", "gerrit", "jira", "confluence", "github/issue", "github/pull_request", "bugzilla", "bugzillarest"}
patterns := s.projectSlugsToIndexPatterns(projectSlugs, dataSourceTypes)
patternAll := s.projectSlugsToIndexPattern(projectSlugs)
// FIXME: hack to deal with broken slack mapping: starts
patternAll += ",-*-slack"
for i := range patterns {
patterns[i] += ",-*-slack"
}
// FIXME: hack to deal with broken slack mapping: ends
fmt.Printf("%s %+v\n", patternAll, patterns)
log.Debug(
fmt.Sprintf(
"GetTopContributors: projectSlugs:%+v dataSourceTypes:%+v patterns:%+v patternAll:%s from:%d to:%d limit:%d offset:%d search:%s sortField:%s sortOrder:%s useSearchInMergeQueries:%v",
projectSlugs,
dataSourceTypes,
patterns,
patternAll,
from,
to,
limit,
offset,
search,
sortField,
sortOrder,
useSearchInMergeQueries,
),
)
top = &models.TopContributorsFlatOutput{}
defer func() {
inf := ""
nTop := len(top.Contributors)
if nTop > shared.LogListMax {
inf = fmt.Sprintf("%d", nTop)
} else {
inf = fmt.Sprintf("%+v", s.ToLocalTopContributorsFlatObj(top))
}
log.Debug(
fmt.Sprintf(
"GetTopContributors(exit): projectSlugs:%+v dataSourceTypes:%+v patterns:%+v patternAll:%s from:%d to:%d limit:%d offset:%d search:%s sortField:%s sortOrder:%s useSearchInMergeQueries:%v top:%+v err:%v",
projectSlugs,
dataSourceTypes,
patterns,
patternAll,
from,
to,
limit,
offset,
search,
sortField,
sortOrder,
useSearchInMergeQueries,
inf,
err,
),
)
}()
var dsFields map[string]string
fields := make(map[string]map[string]string)
mainPattern := ""
mainDataSourceType := "all"
if len(dataSourceTypes) == 1 {
mainDataSourceType = dataSourceTypes[0]
}
mainColumn := "count(*) as cnt"
mainSortField := "cnt"
mainSortOrder := "desc"
if sortField == "author_uuid" {
mainSortField = "author_uuid"
mainSortOrder = sortOrder
}
for i, dataSourceType := range dataSourceTypes {
dsFields, err = s.dataSourceTypeFields(dataSourceType)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
fields[dataSourceType] = dsFields
if mainPattern == "" {
for column, columnStr := range dsFields {
// Uncomment to have default sort order by 'git_commits'
// if column == sortField || (column == "git_commits" && sortField == "") {
if column == sortField {
if sortField == "" {
sortField = column
}
if sortOrder == "" {
sortOrder = "desc"
}
mainPattern = patterns[i]
mainDataSourceType = dataSourceType
mainColumn = columnStr
mainSortField = sortField
mainSortOrder = sortOrder
break
}
}
}
}
if mainPattern == "" {
if sortField != "" && sortField != "author_uuid" {
err = errs.Wrap(errs.New(fmt.Errorf("cannot find main data source type for sort column: %s", sortField), errs.ErrBadRequest), "es.GetTopContributors")
return
}
if len(dataSourceTypes) > 0 {
mainPattern = strings.Join(s.projectSlugsToIndexPatterns(projectSlugs, dataSourceTypes), ",")
} else {
mainPattern = s.projectSlugsToIndexPattern(projectSlugs)
}
// FIXME: hack to deal with broken slack mapping
mainPattern += ",-*-slack"
}
top.DataSourceTypes = []*models.DataSourceTypeFields{}
//map to keep order of datasource fields output
dataSourceOrder := map[string]int{
"git": 0,
"gerrit": 1,
"github/pull_request": 2,
"jira": 3,
"github/issue": 4,
"bugzilla": 5,
"confluence": 6,
"slack": 7,
"rocketchat": 8,
"pipermail": 9,
"groupsio": 10,
"discourse": 11,
"jenkins": 12,
"dockerhub": 13,
}
for dataSourceType, dataSourceFields := range fields {
dataSourceTypeName := dataSourceType
if dataSourceTypeName == "bugzillarest" {
dataSourceTypeName = "bugzilla"
}
dsFields := []string{}
for field := range dataSourceFields {
dsFields = append(dsFields, field)
}
top.DataSourceTypes = append(
top.DataSourceTypes,
&models.DataSourceTypeFields{
Name: dataSourceTypeName,
Fields: dsFields,
},
)
}
for i := 0; i < len(top.DataSourceTypes); i++ {
first := 0
if _, ok := dataSourceOrder[top.DataSourceTypes[i].Name]; ok {
first = dataSourceOrder[top.DataSourceTypes[i].Name]
} else {
first = 99
}
minIndex := i
for j := i; j < len(top.DataSourceTypes); j++ {
current := 0
if _, ok := dataSourceOrder[top.DataSourceTypes[j].Name]; ok {
current = dataSourceOrder[top.DataSourceTypes[j].Name]
} else {
current = 99
}
if current < first {
first = current
minIndex = j
}
}
tempDataSource := top.DataSourceTypes[i]
top.DataSourceTypes[i] = top.DataSourceTypes[minIndex]
top.DataSourceTypes[minIndex] = tempDataSource
}
// Get count of all contributors
var searchCondAll string
if useCaptureAllPatternToCountContributors {
searchCondAll, err = s.searchCondition(patternAll, search)
} else {
searchCondAll, err = s.searchCondition(mainPattern, search)
}
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
// Add from, to filter
searchCondAll += fmt.Sprintf(
` and \"author_uuid\" is not null and length(\"author_uuid\") = 40 and not (\"author_bot\" = true) and cast(\"metadata__updated_on\" as long) >= %d and cast(\"metadata__updated_on\" as long) < %d`,
from,
to,
)
if !useCaptureAllPatternToCountContributors {
cnd := ""
cnd, err = s.additionalWhere(mainDataSourceType, mainSortField)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
if cnd != "" {
searchCondAll += " " + cnd
}
}
if useCaptureAllPatternToCountContributors {
top.ContributorsCount, err = s.ContributorsCount(patternAll, searchCondAll)
} else {
// fmt.Printf(">>> mainPattern = %s\n", mainPattern)
// fmt.Printf(">>> searchCondAll = %s\n", searchCondAll)
top.ContributorsCount, err = s.ContributorsCount(mainPattern, searchCondAll)
}
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
fromIdx := offset * limit
toIdx := fromIdx + limit
if fromIdx >= shared.MaxAggsSize {
return
}
if toIdx > shared.MaxAggsSize {
toIdx = shared.MaxAggsSize
}
if fromIdx == toIdx {
return
}
if fromIdx >= top.ContributorsCount {
return
}
if toIdx > top.ContributorsCount {
toIdx = top.ContributorsCount
}
if fromIdx == toIdx {
return
}
searchCond := ""
searchCondMap := make(map[string]string)
searchCond, err = s.searchCondition(mainPattern, search)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
searchCondMap[mainPattern] = searchCond
query := ""
query, err = s.contributorStatsMainQuery(mainDataSourceType, mainPattern, mainColumn, from, to, limit, offset, searchCond, mainSortField, mainSortOrder)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
var (
res map[string][]string
drop bool
)
res, drop, err = s.dataSourceQuery(query)
if drop == true {
err = fmt.Errorf("cannot find main index, no data available for all projects '%+v'", projectSlugs)
}
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
results := make(map[string]map[string]string)
nResults := int64(len(res["author_uuid"]))
if fromIdx > nResults {
fromIdx = nResults
}
if toIdx > nResults {
toIdx = nResults
}
if fromIdx == toIdx {
return
}
var uuids []string
for i := fromIdx; i < toIdx; i++ {
uuid := res["author_uuid"][i]
rec, ok := results[uuid]
if !ok {
rec = make(map[string]string)
}
for column, values := range res {
if column == "author_uuid" || column == "cnt" {
continue
}
rec[column] = values[i]
}
results[uuid] = rec
uuids = append(uuids, uuid)
}
uuidsCond := `and \"author_uuid\" in (`
for _, uuid := range uuids {
uuidsCond += "'" + uuid + "',"
}
uuidsCond = uuidsCond[:len(uuidsCond)-1] + ")"
thrN := s.GetThreadsNum()
searchCond = ""
queries := make(map[string]map[string]string)
if thrN > 1 {
mtx := &sync.Mutex{}
condMtx := &sync.Mutex{}
ch := make(chan error)
nThreads := 0
for i, dataSourceType := range dataSourceTypes {
mtx.Lock()
queries[dataSourceType] = make(map[string]string)
mtx.Unlock()
for column, columnStr := range fields[dataSourceType] {
if column == sortField {
continue
}
go func(ch chan error, dataSourceType, pattern, column, columnStr string) (err error) {
defer func() {
ch <- err
}()
var (
ok bool
srchCond string
)
if useSearchInMergeQueries {
condMtx.Lock()
srchCond, ok = searchCondMap[pattern]
if !ok {
srchCond, err = s.searchCondition(pattern, search)
if err == nil {
searchCondMap[pattern] = srchCond
}
}
condMtx.Unlock()
if err != nil {
return
}
}
query := ""
query, err = s.contributorStatsMergeQuery(
dataSourceType,
pattern,
column,
columnStr,
srchCond,
uuidsCond,
from,
to,
useSearchInMergeQueries,
)
if err != nil {
return
}
mtx.Lock()
queries[dataSourceType][column] = query
mtx.Unlock()
return
}(ch, dataSourceType, patterns[i], column, columnStr)
nThreads++
if nThreads == thrN {
err = <-ch
nThreads--
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
}
}
}
for nThreads > 0 {
err = <-ch
nThreads--
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
}
} else {
for i, dataSourceType := range dataSourceTypes {
queries[dataSourceType] = make(map[string]string)
var ok bool
if useSearchInMergeQueries {
searchCond, ok = searchCondMap[patterns[i]]
if !ok {
searchCond, err = s.searchCondition(patterns[i], search)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
searchCondMap[patterns[i]] = searchCond
}
}
for column, columnStr := range fields[dataSourceType] {
if column == sortField {
continue
}
queries[dataSourceType][column], err = s.contributorStatsMergeQuery(
dataSourceType,
patterns[i],
column,
columnStr,
searchCond,
uuidsCond,
from,
to,
useSearchInMergeQueries,
)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
}
}
}
mergeResults := func(res map[string][]string) (err error) {
log.Debug(fmt.Sprintf("Merging %d result", len(res)))
l := len(res["author_uuid"])
for i := 0; i < l; i++ {
uuid := res["author_uuid"][i]
rec, ok := results[uuid]
if !ok {
err = errs.Wrap(errs.New(fmt.Errorf("merge query returned uuid %s which is not present in main query results", uuid), errs.ErrBadRequest), "mergeResults")
return
}
for column, values := range res {
if column == "author_uuid" {
continue
}
rec[column] = values[i]
}
results[uuid] = rec
}
return
}
dropDS := func(dsName string) {
log.Warn("Dropping DS: " + dsName + "\n")
idx := -1
for i, ds := range top.DataSourceTypes {
if ds.Name == dsName {
idx = i
break
}
}
if idx >= 0 {
l := len(top.DataSourceTypes)
top.DataSourceTypes[idx] = top.DataSourceTypes[l-1]
top.DataSourceTypes = top.DataSourceTypes[:l-1]
log.Warn(fmt.Sprintf("Dropped DS %s at #%d\n", dsName, idx))
}
}
type queryResult struct {
err error
drop bool
ds string
}
var mqr queryResult
if thrN > 1 {
ch := make(chan queryResult)
nThreads := 0
mtx := &sync.Mutex{}
for ds, data := range queries {
for column, query := range data {
if column == sortField {
continue
}
go func(ch chan queryResult, ds, query string) (qr queryResult) {
defer func() {
ch <- qr
}()
qr.ds = ds
res, qr.drop, qr.err = s.dataSourceQuery(query)
if qr.err != nil {
return
}
mtx.Lock()
qr.err = mergeResults(res)
mtx.Unlock()
return
}(ch, ds, query)
nThreads++
if nThreads == thrN {
mqr = <-ch
nThreads--
if mqr.err != nil {
err = errs.Wrap(errs.New(mqr.err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
if mqr.drop {
dropDS(mqr.ds)
}
}
}
}
for nThreads > 0 {
mqr = <-ch
nThreads--
if mqr.err != nil {
err = errs.Wrap(errs.New(mqr.err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
if mqr.drop {
dropDS(mqr.ds)
}
}
} else {
for ds, data := range queries {
for column, query := range data {
if column == sortField {
continue
}
var res map[string][]string
res, drop, err = s.dataSourceQuery(query)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
if drop {
dropDS(ds)
continue
}
err = mergeResults(res)
if err != nil {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "es.GetTopContributors")
return
}
}
}
}
getInt := func(uuid, column string) int64 {
strVal, ok := results[uuid][column]
if !ok {
return 0
}
floatValue, err := strconv.ParseFloat(strVal, 64)
if err != nil {
return 0
}
return int64(floatValue)
}
getFloat := func(uuid, column string) float64 {
strVal, ok := results[uuid][column]
if !ok {
return 0
}
floatValue, err := strconv.ParseFloat(strVal, 64)
if err != nil {
return 0
}
return floatValue
}
for _, uuid := range uuids {
if len(results[uuid]) > 0 { // check for Zero contributions.
var ok bool
confluenceLastActionDate := ""
daysAgo := 0.0
confluenceLastActionDate, ok = results[uuid]["confluence_last_action_date"]
if ok {
dt, err := s.TimeParseAny(confluenceLastActionDate)
if err == nil {
dtMillis := float64(dt.Unix() * 1000.0)
nowMillis := float64(time.Now().Unix()) * 1000.0
daysAgo = (nowMillis - dtMillis) / 86400000.0
} else {
confluenceLastActionDate = ""
}
}
// TOPCON
contributor := &models.ContributorFlatStats{
UUID: uuid,
GitLinesAdded: getInt(uuid, "git_lines_added"),
GitLinesChanged: getInt(uuid, "git_lines_changed"),
GitLinesRemoved: getInt(uuid, "git_lines_removed"),
GitCommits: getInt(uuid, "git_commits"),
GerritApprovals: getInt(uuid, "gerrit_approvals"),
GerritMergedChangesets: getInt(uuid, "gerrit_merged_changesets"),
GerritChangesets: getInt(uuid, "gerrit_changesets"),
GerritComments: getInt(uuid, "gerrit_comments"),
JiraComments: getInt(uuid, "jira_comments"),
JiraIssuesCreated: getInt(uuid, "jira_issues_created"),
JiraIssuesAssigned: getInt(uuid, "jira_issues_assigned"),
JiraIssuesClosed: getInt(uuid, "jira_issues_closed"),
JiraAverageIssueOpenDays: getFloat(uuid, "jira_average_issue_open_days"),
ConfluencePagesCreated: getInt(uuid, "confluence_pages_created"),
ConfluencePagesEdited: getInt(uuid, "confluence_pages_edited"),
ConfluenceBlogPosts: getInt(uuid, "confluence_blog_posts"),
ConfluenceComments: getInt(uuid, "confluence_comments"),
ConfluenceAttachments: getInt(uuid, "confluence_attachments"),
ConfluenceLastActionDate: confluenceLastActionDate,
ConfluenceDaysSinceLastDocumentation: daysAgo,
GithubIssueIssuesCreated: getInt(uuid, "github_issue_issues_created"),
GithubIssueIssuesClosed: getInt(uuid, "github_issue_issues_closed"),
GithubIssueIssuesAssigned: getInt(uuid, "github_issue_issues_assigned"),
GithubIssueIssuesComments: getInt(uuid, "github_issue_issues_comments"),
GithubIssueAverageTimeOpenDays: getFloat(uuid, "github_issue_average_time_open_days"),
GithubPullRequestPrsCreated: getInt(uuid, "github_pull_request_prs_created"),
GithubPullRequestPrsMerged: getInt(uuid, "github_pull_request_prs_merged"),
GithubPullRequestPrsOpen: getInt(uuid, "github_pull_request_prs_open"),
GithubPullRequestPrsClosed: getInt(uuid, "github_pull_request_prs_closed"),
GithubPullRequestPrsReviewed: getInt(uuid, "github_pull_request_prs_reviewed"),
GithubPullRequestPrsApproved: getInt(uuid, "github_pull_request_prs_approved"),
GithubPullRequestPrsReviewComments: getInt(uuid, "github_pull_request_prs_review_comments"),
GithubPullRequestPrsCommentActivity: getInt(uuid, "github_pull_request_prs_comment_activity"),
BugzillaIssuesCreated: getInt(uuid, "bugzilla_issues_created"),
BugzillaIssuesClosed: getInt(uuid, "bugzilla_issues_closed"),
BugzillaIssuesAssigned: getInt(uuid, "bugzilla_issues_assigned"),
BugzillaAverageIssueOpenDays: getFloat(uuid, "bugzilla_average_issue_open_days"),
}
top.Contributors = append(top.Contributors, contributor)
}
}
return
}
func (s *service) UpdateByQuery(indexPattern, updateField string, updateTo interface{}, termField string, termCond interface{}, detached bool) (err error) {
log.Info(
fmt.Sprintf(
"UpdateByQuery: indexPattern:%s updateField:%s updateTo:%+v termField:%s termCond:%+v detached:%v",
indexPattern,
updateField,
updateTo,
termField,
termCond,
detached,
),
)
defer func() {
logf := log.Info
if err != nil {
if detached {
logf = log.Warn
err = errs.Wrap(err, "UpdateByQuery")
} else {
err = errs.Wrap(errs.New(err, errs.ErrBadRequest), "UpdateByQuery")
}
}
logf(
fmt.Sprintf(
"UpdateByQuery(exit): indexPattern:%s updateField:%s updateTo:%+v termField:%s termCond:%+v detached:%v err:%v",
indexPattern,
updateField,
updateTo,
termField,
termCond,
detached,
err,
),
)
}()
updateToStr := ""
termCondStr := ""
switch value := updateTo.(type) {
case string:
updateToStr = `"` + s.JSONEscape(value) + `"`
default:
updateToStr = fmt.Sprintf("%v", updateTo)
}
switch value := termCond.(type) {
case string:
termCondStr = `"` + s.JSONEscape(value) + `"`
default:
termCondStr = fmt.Sprintf("%v", termCond)
}
data := fmt.Sprintf(
`{"script":{"inline":"ctx._source.%s=%s"},"query":{"term":{"%s":%s}}}`,
s.JSONEscape(updateField),
updateToStr,
s.JSONEscape(termField),
termCondStr,
)
payloadBytes := []byte(data)
payloadBody := bytes.NewReader(payloadBytes)
method := "POST"
url := fmt.Sprintf("%s/%s/_update_by_query?conflicts=proceed&refresh=true&timeout=20m", s.url, indexPattern)
req, err := http.NewRequest(method, os.ExpandEnv(url), payloadBody)
if err != nil {
err = fmt.Errorf("new request error: %+v for %s url: %s, data: %+v", err, method, url, data)
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
err = fmt.Errorf("do request error: %+v for %s url: %s, data: %+v", err, method, url, data)
return
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
body, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
err = fmt.Errorf("ReadAll request error: %+v for %s url: %s, data: %+v", err2, method, url, data)
return
}
err = fmt.Errorf("Method:%s url:%s status:%d data:%+v\n%s", method, url, resp.StatusCode, data, body)
return
}
return
}
func (s *service) search(index string, query io.Reader) (res *esapi.Response, err error) {
return s.client.Search(
s.client.Search.WithIndex(index),
s.client.Search.WithBody(query),
)
}
|
[
"\"USE_SEARCH_IN_MERGE\""
] |
[] |
[
"USE_SEARCH_IN_MERGE"
] |
[]
|
["USE_SEARCH_IN_MERGE"]
|
go
| 1 | 0 | |
e2e/s3/accesspoint_test.go
|
package e2e_test
import (
"context"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/types"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cloudformationv1alpha1 "go.awsctrl.io/manager/apis/cloudformation/v1alpha1"
s3v1alpha1 "go.awsctrl.io/manager/apis/s3/v1alpha1"
metav1alpha1 "go.awsctrl.io/manager/apis/meta/v1alpha1"
)
// RunAccessPointSpecs allows all instance E2E tests to run
var _ = Describe("Run s3 AccessPoint Controller", func() {
Context("Without AccessPoint{} existing", func() {
It("Should create s3.AccessPoint{}", func() {
var stackID string
var stackName string
var stack *cloudformationv1alpha1.Stack
k8sclient := k8smanager.GetClient()
Expect(k8sclient).ToNot(BeNil())
instance := &s3v1alpha1.AccessPoint{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sample-accesspoint-",
Namespace: podnamespace,
},
Spec: s3v1alpha1.AccessPointSpec{},
}
By("Creating new s3 AccessPoint")
Expect(k8sclient.Create(context.Background(), instance)).Should(Succeed())
key := types.NamespacedName{
Name: instance.GetName(),
Namespace: podnamespace,
}
By("Expecting CreateComplete")
Eventually(func() bool {
By("Getting latest s3 AccessPoint")
instance = &s3v1alpha1.AccessPoint{}
err := k8sclient.Get(context.Background(), key, instance)
if err != nil {
return false
}
stackID = instance.GetStackID()
stackName = instance.GetStackName()
return instance.Status.Status == metav1alpha1.CreateCompleteStatus ||
(os.Getenv("USE_AWS_CLIENT") != "true" && instance.Status.Status != "")
}, timeout, interval).Should(BeTrue())
By("Checking object OwnerShip")
Eventually(func() bool {
stackkey := types.NamespacedName{
Name: stackName,
Namespace: key.Namespace,
}
stack = &cloudformationv1alpha1.Stack{}
err := k8sclient.Get(context.Background(), stackkey, stack)
if err != nil {
return false
}
expectedOwnerReference := v1.OwnerReference{
Kind: instance.Kind,
APIVersion: instance.APIVersion,
UID: instance.UID,
Name: instance.Name,
}
ownerrefs := stack.GetOwnerReferences()
Expect(len(ownerrefs)).To(Equal(1))
return ownerrefs[0].Name == expectedOwnerReference.Name
}, timeout, interval).Should(BeTrue())
By("Deleting s3 AccessPoint")
Expect(k8sclient.Delete(context.Background(), instance)).Should(Succeed())
By("Deleting AccessPoint Stack")
Expect(k8sclient.Delete(context.Background(), stack)).Should(Succeed())
By("Expecting metav1alpha1.DeleteCompleteStatus")
Eventually(func() bool {
if os.Getenv("USE_AWS_CLIENT") != "true" {
return true
}
output, err := awsclient.GetClient("us-west-2").DescribeStacks(&cloudformation.DescribeStacksInput{StackName: aws.String(stackID)})
Expect(err).To(BeNil())
stackoutput := output.Stacks[0].StackStatus
return *stackoutput == "DELETE_COMPLETE"
}, timeout, interval).Should(BeTrue())
})
})
})
|
[
"\"USE_AWS_CLIENT\"",
"\"USE_AWS_CLIENT\""
] |
[] |
[
"USE_AWS_CLIENT"
] |
[]
|
["USE_AWS_CLIENT"]
|
go
| 1 | 0 | |
pkg/build/builder/cmd/builder.go
|
package cmd
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
restclient "k8s.io/client-go/rest"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
buildapiv1 "github.com/openshift/api/build/v1"
bld "github.com/openshift/builder/pkg/build/builder"
"github.com/openshift/builder/pkg/build/builder/cmd/scmauth"
"github.com/openshift/builder/pkg/build/builder/timing"
builderutil "github.com/openshift/builder/pkg/build/builder/util"
utillog "github.com/openshift/builder/pkg/build/builder/util/log"
"github.com/openshift/builder/pkg/version"
buildscheme "github.com/openshift/client-go/build/clientset/versioned/scheme"
buildclientv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
"github.com/openshift/library-go/pkg/git"
"github.com/openshift/library-go/pkg/serviceability"
s2iapi "github.com/openshift/source-to-image/pkg/api"
s2igit "github.com/openshift/source-to-image/pkg/scm/git"
)
var (
log = utillog.ToFile(os.Stderr, 2)
buildScheme = runtime.NewScheme()
buildCodecFactory = serializer.NewCodecFactory(buildscheme.Scheme)
buildJSONCodec runtime.Codec
)
func init() {
buildJSONCodec = buildCodecFactory.LegacyCodec(buildapiv1.SchemeGroupVersion)
}
type builder interface {
Build(dockerClient bld.DockerClient, sock string, buildsClient buildclientv1.BuildInterface, build *buildapiv1.Build, cgLimits *s2iapi.CGroupLimits) error
}
type builderConfig struct {
out io.Writer
build *buildapiv1.Build
sourceSecretDir string
dockerClient bld.DockerClient
dockerEndpoint string
buildsClient buildclientv1.BuildInterface
cleanup func()
store storage.Store
blobCache string
}
func newBuilderConfigFromEnvironment(out io.Writer, needsDocker bool) (*builderConfig, error) {
cfg := &builderConfig{}
var err error
cfg.out = out
buildStr := os.Getenv("BUILD")
cfg.build = &buildapiv1.Build{}
obj, _, err := buildJSONCodec.Decode([]byte(buildStr), nil, cfg.build)
if err != nil {
return nil, fmt.Errorf("unable to parse build string: %v", err)
}
ok := false
cfg.build, ok = obj.(*buildapiv1.Build)
if !ok {
return nil, fmt.Errorf("build string %s is not a build: %#v", buildStr, obj)
}
if log.Is(4) {
redactedBuild := builderutil.SafeForLoggingBuild(cfg.build)
bytes, err := runtime.Encode(buildJSONCodec, redactedBuild)
if err != nil {
log.V(4).Infof("unable to print debug line: %v", err)
} else {
log.V(4).Infof("redacted build: %v", string(bytes))
}
}
// sourceSecretsDir (SOURCE_SECRET_PATH)
cfg.sourceSecretDir = os.Getenv("SOURCE_SECRET_PATH")
if needsDocker {
var systemContext types.SystemContext
if registriesConfPath, ok := os.LookupEnv("BUILD_REGISTRIES_CONF_PATH"); ok && len(registriesConfPath) > 0 {
if _, err := os.Stat(registriesConfPath); err == nil {
systemContext.SystemRegistriesConfPath = registriesConfPath
}
}
if registriesDirPath, ok := os.LookupEnv("BUILD_REGISTRIES_DIR_PATH"); ok && len(registriesDirPath) > 0 {
if _, err := os.Stat(registriesDirPath); err == nil {
systemContext.RegistriesDirPath = registriesDirPath
}
}
if signaturePolicyPath, ok := os.LookupEnv("BUILD_SIGNATURE_POLICY_PATH"); ok && len(signaturePolicyPath) > 0 {
if _, err := os.Stat(signaturePolicyPath); err == nil {
systemContext.SignaturePolicyPath = signaturePolicyPath
}
}
storeOptions, err := storage.DefaultStoreOptions(false, 0)
if err != nil {
return nil, err
}
if driver, ok := os.LookupEnv("BUILD_STORAGE_DRIVER"); ok {
storeOptions.GraphDriverName = driver
}
if storageOptions, ok := os.LookupEnv("BUILD_STORAGE_OPTIONS"); ok {
if err := json.Unmarshal([]byte(storageOptions), &storeOptions.GraphDriverOptions); err != nil {
log.V(0).Infof("Error parsing BUILD_STORAGE_OPTIONS (%q): %v", storageOptions, err)
return nil, err
}
}
if storageConfPath, ok := os.LookupEnv("BUILD_STORAGE_CONF_PATH"); ok && len(storageConfPath) > 0 {
if _, err := os.Stat(storageConfPath); err == nil {
storage.ReloadConfigurationFile(storageConfPath, &storeOptions)
}
}
store, err := storage.GetStore(storeOptions)
cfg.store = store
if err != nil {
return nil, err
}
cfg.cleanup = func() {
if _, err := store.Shutdown(false); err != nil {
log.V(0).Infof("Error shutting down storage: %v", err)
}
}
istorage.Transport.SetStore(store)
// Default to using /var/cache/blobs as a blob cache, but allow its location
// to be changed by setting $BUILD_BLOBCACHE_DIR. Setting the location to an
// empty value disables the cache.
cfg.blobCache = "/var/cache/blobs"
if blobCacheDir, isSet := os.LookupEnv("BUILD_BLOBCACHE_DIR"); isSet {
cfg.blobCache = blobCacheDir
}
imageOptimizationPolicy := buildapiv1.ImageOptimizationNone
if s := cfg.build.Spec.Strategy.DockerStrategy; s != nil {
// Default to possibly-multiple-layer builds for Dockerfile-based builds, unless something else was specified.
if policy := s.ImageOptimizationPolicy; policy != nil {
imageOptimizationPolicy = *policy
}
}
if s := cfg.build.Spec.Strategy.SourceStrategy; s != nil {
// Always use base-image+single-layer builds for S2I builds.
imageOptimizationPolicy = buildapiv1.ImageOptimizationSkipLayers
}
dockerClient, err := bld.GetDaemonlessClient(systemContext, store, os.Getenv("BUILD_ISOLATION"), cfg.blobCache, imageOptimizationPolicy)
if err != nil {
return nil, fmt.Errorf("no daemonless store: %v", err)
}
cfg.dockerClient = dockerClient
// S2I requires this to be set, even though we aren't going to use
// docker because we're just generating a dockerfile.
// TODO: update the validation in s2i to be smarter and then
// remove this.
cfg.dockerEndpoint = "n/a"
}
// buildsClient (KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT)
clientConfig, err := restclient.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("cannot connect to the server: %v", err)
}
buildsClient, err := buildclientv1.NewForConfig(clientConfig)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
cfg.buildsClient = buildsClient.Builds(cfg.build.Namespace)
return cfg, nil
}
func (c *builderConfig) setupGitEnvironment() (string, []string, error) {
// For now, we only handle git. If not specified, we're done
gitSource := c.build.Spec.Source.Git
if gitSource == nil {
return "", []string{}, nil
}
sourceSecret := c.build.Spec.Source.SourceSecret
gitEnv := []string{"GIT_ASKPASS=true"}
// If a source secret is present, set it up and add its environment variables
if sourceSecret != nil {
// TODO: this should be refactored to let each source type manage which secrets
// it accepts
sourceURL, err := s2igit.Parse(gitSource.URI)
if err != nil {
return "", nil, fmt.Errorf("cannot parse build URL: %s", gitSource.URI)
}
scmAuths := scmauth.GitAuths(sourceURL)
secretsEnv, overrideURL, err := scmAuths.Setup(c.sourceSecretDir)
if err != nil {
return c.sourceSecretDir, nil, fmt.Errorf("cannot setup source secret: %v", err)
}
if overrideURL != nil {
gitSource.URI = overrideURL.String()
}
gitEnv = append(gitEnv, secretsEnv...)
}
if gitSource.HTTPProxy != nil && len(*gitSource.HTTPProxy) > 0 {
gitEnv = append(gitEnv, fmt.Sprintf("HTTP_PROXY=%s", *gitSource.HTTPProxy))
gitEnv = append(gitEnv, fmt.Sprintf("http_proxy=%s", *gitSource.HTTPProxy))
}
if gitSource.HTTPSProxy != nil && len(*gitSource.HTTPSProxy) > 0 {
gitEnv = append(gitEnv, fmt.Sprintf("HTTPS_PROXY=%s", *gitSource.HTTPSProxy))
gitEnv = append(gitEnv, fmt.Sprintf("https_proxy=%s", *gitSource.HTTPSProxy))
}
if gitSource.NoProxy != nil && len(*gitSource.NoProxy) > 0 {
gitEnv = append(gitEnv, fmt.Sprintf("NO_PROXY=%s", *gitSource.NoProxy))
gitEnv = append(gitEnv, fmt.Sprintf("no_proxy=%s", *gitSource.NoProxy))
}
return c.sourceSecretDir, bld.MergeEnv(os.Environ(), gitEnv), nil
}
// clone is responsible for cloning the source referenced in the buildconfig
func (c *builderConfig) clone() error {
ctx := timing.NewContext(context.Background())
var sourceRev *buildapiv1.SourceRevision
defer func() {
c.build.Status.Stages = timing.GetStages(ctx)
bld.HandleBuildStatusUpdate(c.build, c.buildsClient, sourceRev)
}()
secretTmpDir, gitEnv, err := c.setupGitEnvironment()
if err != nil {
return err
}
defer os.RemoveAll(secretTmpDir)
gitClient := git.NewRepositoryWithEnv(gitEnv)
buildDir := bld.InputContentPath
sourceInfo, err := bld.GitClone(ctx, gitClient, c.build.Spec.Source.Git, c.build.Spec.Revision, buildDir)
if err != nil {
c.build.Status.Phase = buildapiv1.BuildPhaseFailed
c.build.Status.Reason = buildapiv1.StatusReasonFetchSourceFailed
c.build.Status.Message = builderutil.StatusMessageFetchSourceFailed
return err
}
if sourceInfo != nil {
sourceRev = bld.GetSourceRevision(c.build, sourceInfo)
}
err = bld.ExtractInputBinary(os.Stdin, c.build.Spec.Source.Binary, buildDir)
if err != nil {
c.build.Status.Phase = buildapiv1.BuildPhaseFailed
c.build.Status.Reason = buildapiv1.StatusReasonFetchSourceFailed
c.build.Status.Message = builderutil.StatusMessageFetchSourceFailed
return err
}
if len(c.build.Spec.Source.ContextDir) > 0 {
if _, err := os.Stat(filepath.Join(buildDir, c.build.Spec.Source.ContextDir)); os.IsNotExist(err) {
err = fmt.Errorf("provided context directory does not exist: %s", c.build.Spec.Source.ContextDir)
c.build.Status.Phase = buildapiv1.BuildPhaseFailed
c.build.Status.Reason = buildapiv1.StatusReasonInvalidContextDirectory
c.build.Status.Message = builderutil.StatusMessageInvalidContextDirectory
return err
}
}
return nil
}
func (c *builderConfig) extractImageContent() error {
ctx := timing.NewContext(context.Background())
defer func() {
c.build.Status.Stages = timing.GetStages(ctx)
bld.HandleBuildStatusUpdate(c.build, c.buildsClient, nil)
}()
buildDir := bld.InputContentPath
err := bld.ExtractImageContent(ctx, c.dockerClient, c.store, buildDir, c.build, c.blobCache)
if err != nil {
c.build.Status.Phase = buildapiv1.BuildPhaseFailed
c.build.Status.Reason = buildapiv1.StatusReasonFetchImageContentFailed
c.build.Status.Message = builderutil.StatusMessageFetchImageContentFailed
}
return err
}
// execute is responsible for running a build
func (c *builderConfig) execute(b builder) error {
cgLimits, err := bld.GetCGroupLimits()
if err != nil {
return fmt.Errorf("failed to retrieve cgroup limits: %v", err)
}
log.V(4).Infof("Running build with cgroup limits: %#v", *cgLimits)
if err := b.Build(c.dockerClient, c.dockerEndpoint, c.buildsClient, c.build, cgLimits); err != nil {
return fmt.Errorf("build error: %v", err)
}
if c.build.Spec.Output.To == nil || len(c.build.Spec.Output.To.Name) == 0 {
fmt.Fprintf(c.out, "Build complete, no image push requested\n")
}
return nil
}
type dockerBuilder struct{}
// Build starts a Docker build.
func (dockerBuilder) Build(dockerClient bld.DockerClient, sock string, buildsClient buildclientv1.BuildInterface, build *buildapiv1.Build, cgLimits *s2iapi.CGroupLimits) error {
return bld.NewDockerBuilder(dockerClient, buildsClient, build, cgLimits).Build()
}
type s2iBuilder struct{}
// Build starts an S2I build.
func (s2iBuilder) Build(dockerClient bld.DockerClient, sock string, buildsClient buildclientv1.BuildInterface, build *buildapiv1.Build, cgLimits *s2iapi.CGroupLimits) error {
return bld.NewS2IBuilder(dockerClient, sock, buildsClient, build, cgLimits).Build()
}
func runBuild(out io.Writer, builder builder) error {
logVersion()
cfg, err := newBuilderConfigFromEnvironment(out, true)
if err != nil {
return err
}
if cfg.cleanup != nil {
defer cfg.cleanup()
}
return cfg.execute(builder)
}
// RunDockerBuild creates a docker builder and runs its build
func RunDockerBuild(out io.Writer) error {
switch {
case log.Is(6):
serviceability.InitLogrus("DEBUG")
case log.Is(2):
serviceability.InitLogrus("INFO")
case log.Is(0):
serviceability.InitLogrus("WARN")
}
return runBuild(out, dockerBuilder{})
}
// RunS2IBuild creates a S2I builder and runs its build
func RunS2IBuild(out io.Writer) error {
switch {
case log.Is(6):
serviceability.InitLogrus("DEBUG")
case log.Is(2):
serviceability.InitLogrus("INFO")
case log.Is(0):
serviceability.InitLogrus("WARN")
}
return runBuild(out, s2iBuilder{})
}
// RunGitClone performs a git clone using the build defined in the environment
func RunGitClone(out io.Writer) error {
switch {
case log.Is(6):
serviceability.InitLogrus("DEBUG")
case log.Is(2):
serviceability.InitLogrus("INFO")
case log.Is(0):
serviceability.InitLogrus("WARN")
}
logVersion()
cfg, err := newBuilderConfigFromEnvironment(out, false)
if err != nil {
return err
}
if cfg.cleanup != nil {
defer cfg.cleanup()
}
return cfg.clone()
}
// RunManageDockerfile manipulates the dockerfile for docker builds.
// It will write the inline dockerfile to the working directory (possibly
// overwriting an existing dockerfile) and then update the dockerfile
// in the working directory (accounting for contextdir+dockerfilepath)
// with new FROM image information based on the imagestream/imagetrigger
// and also adds some env and label values to the dockerfile based on
// the build information.
func RunManageDockerfile(out io.Writer) error {
switch {
case log.Is(6):
serviceability.InitLogrus("DEBUG")
case log.Is(2):
serviceability.InitLogrus("INFO")
case log.Is(0):
serviceability.InitLogrus("WARN")
}
logVersion()
cfg, err := newBuilderConfigFromEnvironment(out, false)
if err != nil {
return err
}
if cfg.cleanup != nil {
defer cfg.cleanup()
}
return bld.ManageDockerfile(bld.InputContentPath, cfg.build)
}
// RunExtractImageContent extracts files from existing images
// into the build working directory.
func RunExtractImageContent(out io.Writer) error {
switch {
case log.Is(6):
serviceability.InitLogrus("DEBUG")
case log.Is(2):
serviceability.InitLogrus("INFO")
case log.Is(0):
serviceability.InitLogrus("WARN")
}
logVersion()
cfg, err := newBuilderConfigFromEnvironment(out, true)
if err != nil {
return err
}
if cfg.cleanup != nil {
defer cfg.cleanup()
}
return cfg.extractImageContent()
}
// logVersion logs the version of openshift-builder.
func logVersion() {
log.V(5).Infof("openshift-builder %v", version.Get())
}
|
[
"\"BUILD\"",
"\"SOURCE_SECRET_PATH\"",
"\"BUILD_ISOLATION\""
] |
[] |
[
"BUILD",
"BUILD_ISOLATION",
"SOURCE_SECRET_PATH"
] |
[]
|
["BUILD", "BUILD_ISOLATION", "SOURCE_SECRET_PATH"]
|
go
| 3 | 0 | |
Engine/src/main/java/com/cognizant/cognizantits/engine/util/data/KeyMap.java
|
/*
* Copyright 2014 - 2017 Cognizant Technology Solutions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cognizant.cognizantits.engine.util.data;
import com.cognizant.cognizantits.engine.constants.FilePath;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
*
*
*/
public class KeyMap {
public static final Pattern CONTEXT_VARS = Pattern.compile("\\{(.+?)\\}");
public static final Pattern ENV_VARS = Pattern.compile("\\$\\{(.+?)\\}");
public static final Pattern USER_VARS = Pattern.compile("%(.+?)%");
private static Map<Object,Object> systemVars;
public static Map<Object, Object> getSystemVars(){
if(systemVars==null){
systemVars=new HashMap<>();
systemVars.put("app.lib", FilePath.getLibPath());
systemVars.put("app.root", FilePath.getAppRoot());
systemVars.put("app.config", FilePath.getConfigurationPath());
systemVars.putAll(System.getProperties());
systemVars.putAll(System.getenv());
}
return systemVars;
}
public static String resolveContextVars(String in, Map<?,?> vMap) {
return replaceKeys(in, CONTEXT_VARS, true, 1, vMap);
}
public static String resolveEnvVars(String in) {
return replaceKeys(in, ENV_VARS);
}
/**
* replace the given pattern with the key-map value
*
* @param in input string
* @param pattern pattern to match
* @param preserveKeys true to preserve key pattern if its not in key-map
* @param passes no times to resolve
* <br> n for n- level of keys (level -> keys inside keys)
* @param maps key-map list
* @return resolved string
*/
public static String replaceKeys(String in, Pattern pattern, boolean preserveKeys, int passes, Map<?,?>... maps) {
String out = in;
for (int pass = 1; pass <= passes; pass++) {
Matcher m = pattern.matcher(in);
String match, key;
while (m.find()) {
match = m.group();
key = m.group(1);
Boolean resolved = false;
if (maps != null) {
for (Map<?, ?> map : maps) {
if ((resolved = map.containsKey(key))) {
out = out.replace(match, Objects.toString(map.get(key)));
break;
}
}
}
if (!resolved && !preserveKeys) {
out = out.replace(match, key);
}
}
in=out;
}
return out;
}
/**
*
* @param in input string
* @param p pattern to match
* @return resolved string
*/
public static String replaceKeys(String in, Pattern p) {
String properties = System.getProperties().toString().replace("{", "").replace("}", "");
Map <String,String> props = new HashMap<String, String>();
for (String prop : properties.split(",")){
if(prop.split("=").length == 2)
props.put(prop.split("=")[0].trim(), prop.split("=")[1].trim());
}
String environmentVars = System.getenv().toString().replace("{", "").replace("}", "");
Map <String,String> envs = new HashMap<String, String>();
for (String env : environmentVars.split(",")){
if(env.split("=").length == 2)
envs.put(env.split("=")[0].trim(), env.split("=")[1].trim());
}
return replaceKeys(in, p, false, 1, props, envs);
}
public static String resolveSystemVars(String in) {
return replaceKeys(in, CONTEXT_VARS, true, 1, getSystemVars());
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
handler_test.go
|
package gitserver
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/big"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path"
"reflect"
"strings"
"testing"
"time"
"github.com/inconshreveable/log15"
git "github.com/lhchavez/git2go/v29"
"github.com/omegaup/githttp"
"github.com/omegaup/gitserver/gitservertest"
"github.com/omegaup/gitserver/request"
base "github.com/omegaup/go-base"
"github.com/omegaup/quark/common"
)
const (
userAuthorization = "Basic dXNlcjp1c2Vy"
editorAuthorization = "Basic ZWRpdG9yOmVkaXRvcg=="
adminAuthorization = "Basic YWRtaW46YWRtaW4="
readonlyAuthorization = "Basic cmVhZG9ubHk6cmVhZG9ubHk="
)
var (
fakeInteractiveSettingsCompiler = &FakeInteractiveSettingsCompiler{
Settings: nil,
Err: errors.New("unsupported"),
}
)
func authorize(
ctx context.Context,
w http.ResponseWriter,
r *http.Request,
repositoryName string,
operation githttp.GitOperation,
) (githttp.AuthorizationLevel, string) {
username, _, ok := r.BasicAuth()
if !ok {
w.Header().Set("WWW-Authenticate", "Basic realm=\"Git\"")
w.WriteHeader(http.StatusUnauthorized)
return githttp.AuthorizationDenied, ""
}
requestContext := request.FromContext(ctx)
requestContext.Request.Username = username
requestContext.Request.ProblemName = repositoryName
if username == "admin" {
requestContext.Request.IsAdmin = true
requestContext.Request.CanView = true
requestContext.Request.CanEdit = true
return githttp.AuthorizationAllowed, username
}
if username == "editor" {
requestContext.Request.CanView = true
requestContext.Request.CanEdit = true
return githttp.AuthorizationAllowedRestricted, username
}
if username == "user" {
requestContext.Request.CanView = true
return githttp.AuthorizationAllowedRestricted, username
}
if username == "readonly" {
requestContext.Request.CanView = true
return githttp.AuthorizationAllowedReadOnly, username
}
w.WriteHeader(http.StatusForbidden)
return githttp.AuthorizationDenied, username
}
func getReference(
t *testing.T,
problemAlias string,
refName string,
ts *httptest.Server,
) *git.Oid {
prePushURL, err := url.Parse(ts.URL + "/" + problemAlias + "/info/refs?service=git-receive-pack")
if err != nil {
t.Fatalf("Failed to parse URL: %v", err)
}
req := &http.Request{
Method: "GET",
URL: prePushURL,
Header: map[string][]string{
"Authorization": {adminAuthorization},
},
}
res, err := ts.Client().Do(req)
if err != nil {
t.Fatalf("Failed to create pre-pull request: %v", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
t.Fatalf("Failed to request pre-pull: Status %v, headers: %v", res.StatusCode, res.Header)
}
pr := githttp.NewPktLineReader(res.Body)
for {
line, err := pr.ReadPktLine()
if err == io.EOF {
break
}
if err == githttp.ErrFlush {
continue
}
tokens := strings.FieldsFunc(
strings.Trim(string(line), "\n"),
func(r rune) bool {
return r == ' ' || r == '\x00'
},
)
if len(tokens) < 2 {
continue
}
if strings.HasPrefix(tokens[0], "#") || tokens[1] != refName {
continue
}
oid, err := git.NewOid(tokens[0])
if err != nil {
t.Fatalf("Failed to parse oid %v: %v", tokens[0], err)
}
return oid
}
return &git.Oid{}
}
func createCommit(
t *testing.T,
tmpDir string,
problemAlias string,
oldOid *git.Oid,
contents map[string]io.Reader,
commitMessage string,
log log15.Logger,
) (*git.Oid, []byte) {
repo, err := git.OpenRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to open repository: %v", err)
}
defer repo.Free()
var parentCommits []*git.Commit
if !oldOid.IsZero() {
var err error
parentCommit, err := repo.LookupCommit(oldOid)
if err != nil {
t.Fatalf("Failed to lookup commit %v: %v", oldOid, err)
}
parentCommits = append(parentCommits, parentCommit)
}
odb, err := repo.Odb()
if err != nil {
t.Fatalf("Failed to open odb: %v", err)
}
defer odb.Free()
mempack, err := git.NewMempack(odb)
if err != nil {
t.Fatalf("Failed to create mempack: %v", err)
}
tree, err := githttp.BuildTree(repo, contents, log)
if err != nil {
t.Fatalf("Failed to build tree: %v", err)
}
defer tree.Free()
newCommitID, err := repo.CreateCommit(
"",
&git.Signature{
Name: "author",
Email: "[email protected]",
When: time.Unix(0, 0).In(time.UTC),
},
&git.Signature{
Name: "committer",
Email: "[email protected]",
When: time.Unix(0, 0).In(time.UTC),
},
commitMessage,
tree,
parentCommits...,
)
if err != nil {
t.Fatalf("Failed to create commit: %v", err)
}
packContents, err := mempack.Dump(repo)
if err != nil {
t.Fatalf("Failed to create mempack: %v", err)
}
return newCommitID, packContents
}
func push(
t *testing.T,
tmpDir string,
authorization string,
problemAlias string,
refName string,
oldOid, newOid *git.Oid,
packContents []byte,
expectedResponse []githttp.PktLineResponse,
ts *httptest.Server,
) {
t.Helper()
var inBuf bytes.Buffer
{
// Taken from git 2.14.1
pw := githttp.NewPktLineWriter(&inBuf)
pw.WritePktLine([]byte(fmt.Sprintf(
"%s %s %s\x00report-status\n",
oldOid.String(),
newOid.String(),
refName,
)))
if len(packContents) > 0 {
pw.Flush()
if _, err := inBuf.Write(packContents); err != nil {
t.Fatalf("Failed to write packfile: %v", err)
}
}
}
pushURL, err := url.Parse(ts.URL + "/" + problemAlias + "/git-receive-pack")
if err != nil {
t.Fatalf("Failed to parse URL: %v", err)
}
req := &http.Request{
Method: "POST",
URL: pushURL,
Body: ioutil.NopCloser(&inBuf),
Header: map[string][]string{
"Authorization": {authorization},
},
}
res, err := ts.Client().Do(req)
if err != nil {
t.Fatalf("Failed to create pre-push request: %v", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusForbidden {
t.Fatalf("Failed to request pre-push: Status %v, headers: %v", res.StatusCode, res.Header)
}
if actual, ok := githttp.ComparePktLineResponse(res.Body, expectedResponse); !ok {
t.Errorf("push expected %q, got %q", expectedResponse, actual)
}
}
func TestInvalidRef(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, false, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
{
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
repo.Free()
}
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
&git.Oid{},
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader("Sumas"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
userAuthorization,
problemAlias,
"refs/heads/private",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/private read-only\n", Err: nil},
},
ts,
)
push(
t,
tmpDir,
userAuthorization,
problemAlias,
"refs/heads/arbitrarybranchname",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/arbitrarybranchname invalid-ref\n", Err: nil},
},
ts,
)
}
func TestDelete(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, false, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
{
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
repo.Free()
}
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
&git.Oid{},
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader("Sumas"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/changes/initial",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/changes/initial\n", Err: nil},
},
ts,
)
}
push(
t,
tmpDir,
userAuthorization,
problemAlias,
"refs/changes/initial",
getReference(t, problemAlias, "refs/changes/initial", ts),
&git.Oid{},
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/changes/initial delete-unallowed\n", Err: nil},
},
ts,
)
}
func TestServerCreateReview(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, false, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
{
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
repo.Free()
}
// Create code review
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
&git.Oid{},
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader("Sumas"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/changes/initial",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/changes/initial\n", Err: nil},
},
ts,
)
}
// Try a few invalid publish paths
{
// User is not an editor, so they cannot change refs/heads/master.
push(
t,
tmpDir,
userAuthorization,
problemAlias,
"refs/heads/master",
getReference(t, problemAlias, "refs/heads/master", ts),
getReference(t, problemAlias, "refs/changes/initial", ts),
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/master forbidden\n", Err: nil},
},
ts,
)
// User is not an editor, so they cannot change refs/heads/published.
push(
t,
tmpDir,
userAuthorization,
problemAlias,
"refs/heads/published",
getReference(t, problemAlias, "refs/heads/published", ts),
getReference(t, problemAlias, "refs/changes/initial", ts),
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/published forbidden\n", Err: nil},
},
ts,
)
// User is an administrator, but cannot point refs/heads/published to
// something that's not a commit in master.
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/published",
getReference(t, problemAlias, "refs/heads/published", ts),
getReference(t, problemAlias, "refs/changes/initial", ts),
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/published published-must-point-to-commit-in-master\n", Err: nil},
},
ts,
)
}
// Publish initial review
{
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/master",
getReference(t, problemAlias, "refs/heads/master", ts),
getReference(t, problemAlias, "refs/changes/initial", ts),
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/heads/master\n", Err: nil},
},
ts,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/published",
getReference(t, problemAlias, "refs/heads/published", ts),
getReference(t, problemAlias, "refs/heads/master", ts),
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/heads/published\n", Err: nil},
},
ts,
)
}
// Create new revision
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/heads/master", ts),
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("3 2"),
"cases/0.out": strings.NewReader("1"),
"statements/es.markdown": strings.NewReader("Restas"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/changes/initial2",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/changes/initial2\n", Err: nil},
},
ts,
)
}
// Send out a few invalid code reviews.
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{},
"Initial commit",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/review review-bad-layout: iteration uuid in commit message missing or malformed\n", Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"should/not/have/had/trees": strings.NewReader("\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/review review-bad-layout: refs/meta/review must have a flat tree\n", Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("missing trailing newline"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/review review-bad-layout: ledger does not end in newline\n", Err: nil},
},
ts,
)
reviewCommitHash := getReference(t, problemAlias, "refs/changes/initial2", ts).String()
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
reviewCommitHash: strings.NewReader("{}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/review review-bad-layout: missing ledger file\n", Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("non-JSON ledger\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/review json-parse-error: appended ledger contents: invalid character 'o' in literal null (expecting 'u')\n", Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/review review-bad-layout: invalid iteration uuid in ledger entry\n", Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000001\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/review review-bad-layout: invalid iteration uuid in ledger entry\n", Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("non-JSON entry\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{
Line: fmt.Sprintf(
"ng refs/meta/review review-bad-layout: malformed appended comment in %s: invalid character 'o' in literal null (expecting 'u')\n",
reviewCommitHash,
),
Err: nil,
},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"bar\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: invalid author in %s\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: invalid iteration uuid in %s\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: missing or malformed comment uuid in %s\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: duplicate comment uuid in %s\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"missing\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: file 'missing' not found in %s: the path 'missing' does not exist in the given tree\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\",\"parentUuid\":\"\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: parent uuid missing in %s\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000002\",\"parentUuid\":\"00000000-0000-0000-0000-000000000001\",\"range\":{\"lineStart\":0,\"lineEnd\":0,\"colStart\":0,\"colEnd\":0}}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: cannot specify both parentUuid and range in %s\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: empty comment message in %s\n", reviewCommitHash), Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000000",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/meta/review\n", Err: nil},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n" +
"{\"uuid\":\"00000000-0000-0000-0000-000000000001\",\"author\":\"bar\",\"date\":1,\"Summary\":\"Good!\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000001",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{
Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: failed to find %s in review iteration\n", reviewCommitHash),
Err: nil,
},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n" +
"{\"uuid\":\"00000000-0000-0000-0000-000000000001\",\"author\":\"bar\",\"date\":1,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"gaslighting!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n" +
"{\"author\":\"bar\",\"date\":0,\"done\":true,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000001\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000002\",\"parentUuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000001",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{
Line: fmt.Sprintf("ng refs/meta/review review-bad-layout: unexpected non-append to %s\n", reviewCommitHash),
Err: nil,
},
},
ts,
)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
getReference(t, problemAlias, "refs/meta/review", ts),
map[string]io.Reader{
"ledger": strings.NewReader("{\"uuid\":\"00000000-0000-0000-0000-000000000000\",\"author\":\"foo\",\"date\":0,\"Summary\":\"Good!\"}\n" +
"{\"uuid\":\"00000000-0000-0000-0000-000000000001\",\"author\":\"bar\",\"date\":1,\"Summary\":\"Good!\"}\n"),
reviewCommitHash: strings.NewReader("{\"author\":\"foo\",\"date\":0,\"done\":false,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000000\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000001\"}\n" +
"{\"author\":\"bar\",\"date\":0,\"done\":true,\"filename\":\"cases/0.in\",\"iterationUuid\":\"00000000-0000-0000-0000-000000000001\",\"message\":\"Good!\",\"uuid\":\"00000000-0000-0000-0000-000000000002\",\"parentUuid\":\"00000000-0000-0000-0000-000000000001\"}\n"),
},
"Foo\n\nIteration: 00000000-0000-0000-0000-000000000001",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/review",
getReference(t, problemAlias, "refs/meta/review", ts),
newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/meta/review\n", Err: nil},
},
ts,
)
}
// Try a few more invalid publish paths
{
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/published",
getReference(t, problemAlias, "refs/heads/published", ts),
getReference(t, problemAlias, "refs/changes/initial2", ts),
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/published published-must-point-to-commit-in-master\n", Err: nil},
},
ts,
)
}
// Publish second version
{
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/master",
getReference(t, problemAlias, "refs/heads/master", ts),
getReference(t, problemAlias, "refs/changes/initial2", ts),
githttp.EmptyPackfile,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/heads/master\n", Err: nil},
},
ts,
)
}
}
func TestPushGitbomb(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, false, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
{
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
repo.Free()
}
repo, err := git.OpenRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to open repository: %v", err)
}
defer repo.Free()
odb, err := repo.Odb()
if err != nil {
t.Fatalf("Failed to open odb: %v", err)
}
defer odb.Free()
mempack, err := git.NewMempack(odb)
if err != nil {
t.Fatalf("Failed to create mempack: %v", err)
}
oid, err := repo.CreateBlobFromBuffer([]byte{})
if err != nil {
t.Fatalf("Failed to create blob: %v", err)
}
fileMode := git.Filemode(0100644)
for i := 0; i < 24; i++ {
log.Debug("Creating gitbomb", "iteration", i)
treebuilder, err := repo.TreeBuilder()
if err != nil {
t.Fatalf("Failed to create TreeBuilder: %v", err)
}
for _, filename := range []string{"0", "1"} {
if err = treebuilder.Insert(filename, oid, fileMode); err != nil {
t.Fatalf("Failed to insert into TreeBuilder: %v", err)
}
}
oid, err = treebuilder.Write()
if err != nil {
t.Fatalf("Failed to write tree: %v", err)
}
treebuilder.Free()
fileMode = 040000
}
tree, err := repo.LookupTree(oid)
if err != nil {
t.Fatalf("Failed to lookup tree: %v", err)
}
log.Debug("Tree looked up")
newCommitID, err := repo.CreateCommit(
"",
&git.Signature{
Name: "author",
Email: "[email protected]",
When: time.Unix(0, 0).In(time.UTC),
},
&git.Signature{
Name: "committer",
Email: "[email protected]",
When: time.Unix(0, 0).In(time.UTC),
},
"Initial commit",
tree,
)
if err != nil {
t.Fatalf("Failed to create commit: %v", err)
}
packContents, err := mempack.Dump(repo)
if err != nil {
t.Fatalf("Failed to create mempack: %v", err)
}
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/changes/initial",
&git.Oid{}, newCommitID,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/changes/initial too-many-objects-in-packfile\n", Err: nil},
},
ts,
)
}
func TestConfig(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, false, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
{
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
repo.Free()
}
// Normal mirror update.
oldOid := &git.Oid{}
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"config.json": strings.NewReader(`{
"publishing":{
"mode":"mirror",
"repository":"https://github.com/omegaup/test.git"
}
}`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
editorAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/config restricted-ref\n", Err: nil},
},
ts,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/meta/config\n", Err: nil},
},
ts,
)
// Normal subdirectory update.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"config.json": strings.NewReader(`{
"publishing":{
"mode":"subdirectory",
"repository":"https://github.com/omegaup/test.git",
"target":"subdirectory"
}
}`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/meta/config\n", Err: nil},
},
ts,
)
// Empty tree.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/meta/config\n", Err: nil},
},
ts,
)
// Extra files.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"garbage.txt": strings.NewReader(""),
"config.json": strings.NewReader(`{
"publishing":{
"mode":"mirror",
"repository":"https://github.com/omegaup/test.git"
}
}`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/config config-bad-layout: refs/meta/config can only contain a single config.json file\n", Err: nil},
},
ts,
)
// Wrong filename.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"config.txt": strings.NewReader(`{
"publishing":{
"mode":"mirror",
"repository":"https://github.com/omegaup/test.git"
}
}`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/config config-bad-layout: refs/meta/config can only contain a single config.json file\n", Err: nil},
},
ts,
)
// Wrong format.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"config.json": strings.NewReader("invalid json"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/config json-parse-error: config.json: invalid character 'i' looking for beginning of value\n", Err: nil},
},
ts,
)
// Wrong publishing mode.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"config.json": strings.NewReader(`{
"publishing":{
"mode":"invalid"
}
}`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/config config-invalid-publishing-mode\n", Err: nil},
},
ts,
)
// Repository is not an absolute URL.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"config.json": strings.NewReader(`{
"publishing":{
"mode":"mirror",
"repository":"invalid"
}
}`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/config config-repository-not-absolute-url\n", Err: nil},
},
ts,
)
// Missing target for subdirectory.
oldOid = getReference(t, problemAlias, "refs/meta/config", ts)
newOid, packContents = createCommit(
t,
tmpDir,
problemAlias,
oldOid,
map[string]io.Reader{
"config.json": strings.NewReader(`{
"publishing":{
"mode":"subdirectory",
"repository":"https://github.com/omegaup/test.git"
}
}`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/meta/config",
oldOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/meta/config config-subdirectory-missing-target\n", Err: nil},
},
ts,
)
}
func getProblemDistribSettings(repo *git.Repository, tree *git.Tree) (*common.LiteralInput, error) {
settingsJSONEntry, err := tree.EntryByPath("settings.distrib.json")
if err != nil {
return nil, err
}
settingsJSONBlob, err := repo.LookupBlob(settingsJSONEntry.Id)
if err != nil {
return nil, err
}
defer settingsJSONBlob.Free()
var settings common.LiteralInput
if err := json.Unmarshal(settingsJSONBlob.Contents(), &settings); err != nil {
return nil, err
}
return &settings, nil
}
func TestInteractive(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(
authorize,
nil,
true,
OverallWallTimeHardLimit,
&FakeInteractiveSettingsCompiler{
Settings: &common.InteractiveSettings{
Interfaces: map[string]map[string]*common.InteractiveInterface{},
Templates: map[string]string{},
Main: "",
ModuleName: "",
ParentLang: "",
LibinteractiveVersion: "0.0",
},
Err: nil,
},
log,
),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
defer repo.Free()
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
&git.Oid{},
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader(`Sumas
# Examples
||input
Example 1
||output
Example 1
||description
This example won't be copied since there are explicit example files.
||end
`),
"interactive/sums.idl": strings.NewReader(`// sums.idl
interface Main {
};
interface sums {
int sums(int a, int b);
};`),
"interactive/Main.cpp": strings.NewReader(`// Main.cpp
#include <stdio.h>
#include "sums.h"
int main(int argc, char* argv[]) {
int a, b;
scanf("%d %d\n", &a, &b);
printf("%d\n", sums(a, b));
}`),
"interactive/Main.distrib.cpp": strings.NewReader(`// Main.cpp
#include <stdio.h>
#include "sums.h"
int main(int argc, char* argv[]) {
// Este es un ejemplo.
int a, b;
scanf("%d %d\n", &a, &b);
printf("%d\n", sums(a, b));
}`),
"interactive/examples/sample.in": strings.NewReader("0 1"),
"interactive/examples/sample.out": strings.NewReader("1"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/master",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/heads/master\n", Err: nil},
},
ts,
)
}
masterCommit, err := repo.LookupCommit(
getReference(t, problemAlias, "refs/heads/master", ts),
)
if err != nil {
t.Fatalf("Failed to lookup commit: %v", err)
}
defer masterCommit.Free()
masterTree, err := masterCommit.Tree()
if err != nil {
t.Fatalf("Failed to lookup tree: %v", err)
}
defer masterTree.Free()
problemSettings, err := getProblemSettings(
repo,
masterTree,
)
if err != nil {
t.Fatalf("failed to get problem settings: %v", err)
}
if problemSettings.Interactive == nil {
t.Fatalf("Failed to produce interactive settings")
}
problemDistribSettings, err := getProblemDistribSettings(
repo,
masterTree,
)
if err != nil {
t.Fatalf("failed to get problem distributable settings: %v", err)
}
if problemSettings.Limits != *problemDistribSettings.Limits {
t.Errorf("limits expected %q, got %q", problemSettings.Limits, *problemDistribSettings.Limits)
}
if problemDistribSettings.Interactive == nil {
t.Fatalf("Failed to produce interactive settings")
}
expectedExampleCases := map[string]*common.LiteralCaseSettings{
"sample": {
Input: "0 1",
ExpectedOutput: "1",
Weight: big.NewRat(1, 1),
},
}
if !reflect.DeepEqual(expectedExampleCases, problemDistribSettings.Cases) {
t.Errorf(
"Mismatched example cases. expected %v, got %v",
expectedExampleCases,
problemDistribSettings.Cases,
)
}
if "" == problemDistribSettings.Interactive.MainSource {
t.Errorf("Missing main source file")
}
}
func TestExampleCases(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, true, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
defer repo.Free()
parentOid := &git.Oid{}
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
parentOid,
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader("Sumas"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/master",
parentOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/heads/master\n", Err: nil},
},
ts,
)
masterCommit, err := repo.LookupCommit(
getReference(t, problemAlias, "refs/heads/master", ts),
)
if err != nil {
t.Fatalf("Failed to lookup commit: %v", err)
}
defer masterCommit.Free()
parentOid = masterCommit.Id()
masterTree, err := masterCommit.Tree()
if err != nil {
t.Fatalf("Failed to lookup tree: %v", err)
}
defer masterTree.Free()
problemDistribSettings, err := getProblemDistribSettings(
repo,
masterTree,
)
if err != nil {
t.Fatalf("failed to get problem distributable settings: %v", err)
}
expectedExampleCases := map[string]*common.LiteralCaseSettings{}
if !reflect.DeepEqual(expectedExampleCases, problemDistribSettings.Cases) {
t.Errorf(
"Mismatched example cases. expected %v, got %v",
expectedExampleCases,
problemDistribSettings.Cases,
)
}
}
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
parentOid,
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader(`Sumas
# Examples
||input
1 2
||output
3
||input
2 3
||output
5
||end
`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/master",
parentOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/heads/master\n", Err: nil},
},
ts,
)
masterCommit, err := repo.LookupCommit(
getReference(t, problemAlias, "refs/heads/master", ts),
)
if err != nil {
t.Fatalf("Failed to lookup commit: %v", err)
}
defer masterCommit.Free()
parentOid = masterCommit.Id()
masterTree, err := masterCommit.Tree()
if err != nil {
t.Fatalf("Failed to lookup tree: %v", err)
}
defer masterTree.Free()
problemDistribSettings, err := getProblemDistribSettings(
repo,
masterTree,
)
if err != nil {
t.Fatalf("failed to get problem distributable settings: %v", err)
}
expectedExampleCases := map[string]*common.LiteralCaseSettings{
"statement_001": {
Input: "1 2",
ExpectedOutput: "3",
Weight: big.NewRat(1, 1),
},
"statement_002": {
Input: "2 3",
ExpectedOutput: "5",
Weight: big.NewRat(1, 1),
},
}
if !reflect.DeepEqual(expectedExampleCases, problemDistribSettings.Cases) {
t.Errorf(
"Mismatched example cases. expected %v, got %v",
expectedExampleCases,
problemDistribSettings.Cases,
)
}
}
{
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
parentOid,
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"examples/sample.in": strings.NewReader("1 2"),
"examples/sample.out": strings.NewReader("3"),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader(`Sumas
# Examples
||input
1 2
||output
3
||input
2 3
||output
5
||end
`),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/master",
parentOid, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ok refs/heads/master\n", Err: nil},
},
ts,
)
masterCommit, err := repo.LookupCommit(
getReference(t, problemAlias, "refs/heads/master", ts),
)
if err != nil {
t.Fatalf("Failed to lookup commit: %v", err)
}
defer masterCommit.Free()
parentOid = masterCommit.Id()
masterTree, err := masterCommit.Tree()
if err != nil {
t.Fatalf("Failed to lookup tree: %v", err)
}
defer masterTree.Free()
problemDistribSettings, err := getProblemDistribSettings(
repo,
masterTree,
)
if err != nil {
t.Fatalf("failed to get problem distributable settings: %v", err)
}
expectedExampleCases := map[string]*common.LiteralCaseSettings{
"sample": {
Input: "1 2",
ExpectedOutput: "3",
Weight: big.NewRat(1, 1),
},
}
if !reflect.DeepEqual(expectedExampleCases, problemDistribSettings.Cases) {
t.Errorf(
"Mismatched example cases. expected %v, got %v",
expectedExampleCases,
problemDistribSettings.Cases,
)
}
}
}
func TestExtractExampleCasesFromStatement(t *testing.T) {
for _, testCase := range []struct {
statement string
expectedOutput map[string]*common.LiteralCaseSettings
}{
{
statement: `Sumas
||input
First input
||output
First output
||description
yeah...
||input
Second input
||output
Second output
||end`,
expectedOutput: map[string]*common.LiteralCaseSettings{
"statement_001": {
Input: "First input",
ExpectedOutput: "First output",
Weight: big.NewRat(1, 1),
},
"statement_002": {
Input: "Second input",
ExpectedOutput: "Second output",
Weight: big.NewRat(1, 1),
},
},
},
{
statement: `Sumas
||input
Foo
||description
why is this missing an output?
||input
Foo
||input
Another missing output.
||end`,
expectedOutput: map[string]*common.LiteralCaseSettings{},
},
{
statement: `Sumas
||input
Foo
||output
missing the end thingy`,
expectedOutput: map[string]*common.LiteralCaseSettings{},
},
} {
actualOutput := extractExampleCasesFromStatement(testCase.statement)
if !reflect.DeepEqual(testCase.expectedOutput, actualOutput) {
t.Errorf(
"Failed to extract examples from %v. expected %v, got %v",
testCase.statement,
testCase.expectedOutput,
actualOutput,
)
}
}
}
func TestTests(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, true, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
defer repo.Free()
for idx, testcase := range []struct {
name string
extraContents map[string]io.Reader
status string
}{
{
"tests is not a directory",
map[string]io.Reader{
"tests": strings.NewReader(""),
},
"ng refs/heads/master tests-bad-layout: tests/ directory is not a tree\n",
},
{
"Missing tests/tests.json",
map[string]io.Reader{
"tests/foo": strings.NewReader(""),
},
"ng refs/heads/master tests-bad-layout: tests/tests.json is missing\n",
},
{
"Corrupt settings.json",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(""),
},
"ng refs/heads/master json-parse-error: tests/tests.json: EOF\n",
},
{
"Unknown fields",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"foo": "bar"
}`),
},
"ng refs/heads/master json-parse-error: tests/tests.json: json: unknown field \"foo\"\n",
},
{
"Missing validator",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "foo.py"
}
]
}`),
},
"ng refs/heads/master tests-bad-layout: tests/foo.py is missing: the path 'foo.py' does not exist in the given tree\n",
},
{
"Relative paths",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "../solutions/foo.py"
}
]
}`),
},
"ng refs/heads/master tests-bad-layout: tests/../solutions/foo.py is missing: the path '..' does not exist in the given tree\n",
},
{
"Missing score_range and verdict",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "foo.py"
}
]
}`),
"tests/foo.py": strings.NewReader("print 1"),
},
"ng refs/heads/master tests-bad-layout: score_range or validator for foo.py in tests/tests.json should be set\n",
},
{
"Missing score_range",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "foo.py",
"score_range": [1]
}
]
}`),
"tests/foo.py": strings.NewReader("print 1"),
},
"ng refs/heads/master json-parse-error: tests/tests.json: score_range should be an array with two numbers\n",
},
{
"Bad score_range",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "foo.py",
"score_range": [-1, 10]
}
]
}`),
"tests/foo.py": strings.NewReader("print 1"),
},
"ng refs/heads/master json-parse-error: tests/tests.json: values for score_range should be in the interval [0, 1]\n",
},
{
"Bad verdict",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "foo.py",
"score_range": [0, 1],
"verdict": "COOL VERDICT, BRO."
}
]
}`),
"tests/foo.py": strings.NewReader("print 1"),
},
"ng refs/heads/master tests-bad-layout: verdict for foo.py in tests/tests.json is not valid\n",
},
{
"Bad validator",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "solutions/foo.py",
"verdict": "AC"
}
],
"inputs": {
"filename": "test-validator.py"
}
}`),
"tests/solutions/foo.py": strings.NewReader("print 1"),
},
"ng refs/heads/master tests-bad-layout: tests/test-validator.py is missing: the path 'test-validator.py' does not exist in the given tree\n",
},
{
"Valid",
map[string]io.Reader{
"tests/tests.json": strings.NewReader(`{
"solutions": [
{
"filename": "solutions/foo.py",
"score_range": [1, 1],
"verdict": "AC"
}
]
}`),
"tests/solutions/foo.py": strings.NewReader("print 1"),
},
"ok refs/heads/master\n",
},
} {
t.Run(fmt.Sprintf("%d %s", idx, testcase.name), func(t *testing.T) {
contents := map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader("Sumas"),
}
for name, r := range testcase.extraContents {
contents[name] = r
}
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
&git.Oid{},
contents,
"Initial commit",
log,
)
push(
t,
tmpDir,
adminAuthorization,
problemAlias,
"refs/heads/master",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: testcase.status, Err: nil},
},
ts,
)
})
}
}
|
[
"\"PRESERVE\"",
"\"PRESERVE\"",
"\"PRESERVE\"",
"\"PRESERVE\"",
"\"PRESERVE\"",
"\"PRESERVE\"",
"\"PRESERVE\"",
"\"PRESERVE\""
] |
[] |
[
"PRESERVE"
] |
[]
|
["PRESERVE"]
|
go
| 1 | 0 | |
rtrss/config.py
|
import os
import logging
import importlib
# All configuration defaults are set in this module
TRACKER_HOST = 'rutracker.org'
# Timeone for the tracker times
TZNAME = 'Europe/Moscow'
ANNOUNCE_URLS = [
'http://bt.{host}/ann'.format(host=TRACKER_HOST),
'http://bt2.{host}/ann'.format(host=TRACKER_HOST),
'http://bt3.{host}/ann'.format(host=TRACKER_HOST),
'http://bt4.{host}/ann'.format(host=TRACKER_HOST)
]
LOGLEVEL = logging.INFO
LOG_FORMAT_LOGENTRIES = '%(levelname)s %(name)s %(message)s'
LOG_FORMAT_BRIEF = '%(asctime)s %(levelname)s %(name)s %(message)s'
ADMIN_LOGIN = os.environ.get('ADMIN_LOGIN', 'admin')
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', 'admin')
ADMIN_EMAIL = os.environ.get('ADMIN_EMAIL', 'admin@localhost')
IP = '0.0.0.0'
PORT = 8080
APP_ENVIRONMENT = os.environ.get('RTRSS_ENVIRONMENT', 'production')
_mod = importlib.import_module('rtrss.config_{}'.format(APP_ENVIRONMENT))
_envconf = {k: v for k, v in _mod.__dict__.items() if k == k.upper()}
globals().update(_envconf)
|
[] |
[] |
[
"ADMIN_PASSWORD",
"ADMIN_EMAIL",
"ADMIN_LOGIN",
"RTRSS_ENVIRONMENT"
] |
[]
|
["ADMIN_PASSWORD", "ADMIN_EMAIL", "ADMIN_LOGIN", "RTRSS_ENVIRONMENT"]
|
python
| 4 | 0 | |
run/pubsub/e2e_test.py
|
# Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This tests the Pub/Sub to Cloud Run integration
import datetime
import os
import subprocess
import time
import uuid
from google.api_core.exceptions import NotFound
from google.cloud import logging_v2
from google.cloud import pubsub_v1
import pytest
SUFFIX = uuid.uuid4().hex[0:6]
PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
CLOUD_RUN_SERVICE = f"pubsub-test-{SUFFIX}"
TOPIC = f"pubsub-test_{SUFFIX}"
IMAGE_NAME = f"gcr.io/{PROJECT}/pubsub-test-{SUFFIX}"
@pytest.fixture
def container_image():
# Build container image for Cloud Run deployment
subprocess.run(
[
"gcloud",
"builds",
"submit",
"--tag",
IMAGE_NAME,
"--project",
PROJECT,
"--quiet",
],
check=True,
)
yield IMAGE_NAME
# Delete container image
subprocess.run(
[
"gcloud",
"container",
"images",
"delete",
IMAGE_NAME,
"--quiet",
"--project",
PROJECT,
],
check=True,
)
@pytest.fixture
def deployed_service(container_image):
# Deploy image to Cloud Run
subprocess.run(
[
"gcloud",
"run",
"deploy",
CLOUD_RUN_SERVICE,
"--image",
container_image,
"--region=us-central1",
"--project",
PROJECT,
"--platform=managed",
"--no-allow-unauthenticated",
],
check=True,
)
yield CLOUD_RUN_SERVICE
subprocess.run(
[
"gcloud",
"run",
"services",
"delete",
CLOUD_RUN_SERVICE,
"--platform=managed",
"--region=us-central1",
"--quiet",
"--project",
PROJECT,
],
check=True,
)
@pytest.fixture
def service_url(deployed_service):
# Get the URL for the cloud run service
service_url = subprocess.run(
[
"gcloud",
"run",
"--project",
PROJECT,
"--platform=managed",
"--region=us-central1",
"services",
"describe",
CLOUD_RUN_SERVICE,
"--format=value(status.url)",
],
stdout=subprocess.PIPE,
check=True,
).stdout.strip()
yield service_url.decode()
@pytest.fixture()
def pubsub_topic():
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(PROJECT, TOPIC)
publisher.create_topic(request={"name": topic_path})
yield TOPIC
try:
publisher.delete_topic(request={"topic": topic_path})
except NotFound:
print("Topic not found, it was either never created or was already deleted.")
@pytest.fixture(autouse=True)
def pubsub_subscription(pubsub_topic, service_url):
# Create pubsub push subscription to Cloud Run Service
# Attach service account with Cloud Run Invoker role
# See tutorial for details on setting up service-account:
# https://cloud.google.com/run/docs/tutorials/pubsub
publisher = pubsub_v1.PublisherClient()
subscriber = pubsub_v1.SubscriberClient()
subscription_id = f"{pubsub_topic}_sub"
topic_path = publisher.topic_path(PROJECT, pubsub_topic)
subscription_path = subscriber.subscription_path(PROJECT, subscription_id)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=service_url,
oidc_token=pubsub_v1.types.PushConfig.OidcToken(
service_account_email=f"cloud-run-invoker@{PROJECT}.iam.gserviceaccount.com"
),
)
# wrapping in 'with' block automatically calls close on gRPC channel
with subscriber:
subscriber.create_subscription(
request={
"name": subscription_path,
"topic": topic_path,
"push_config": push_config,
}
)
yield
subscriber = pubsub_v1.SubscriberClient()
# delete subscription
with subscriber:
try:
subscriber.delete_subscription(request={"subscription": subscription_path})
except NotFound:
print(
"Unable to delete - subscription either never created or already deleted."
)
def test_end_to_end(pubsub_topic):
# Post the message "Runner" to the topic
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(PROJECT, pubsub_topic)
message = "Runner"
data = message.encode("utf-8")
# When you publish a message, the client returns a future.
future = publisher.publish(topic_path, data)
future.result()
# Check the logs for "Hello Runner"
time.sleep(20) # Slight delay writing to stackdriver
client = logging_v2.LoggingServiceV2Client()
resource_names = [f"projects/{PROJECT}"]
# We add timestamp for making the query faster.
now = datetime.datetime.now(datetime.timezone.utc)
filter_date = now - datetime.timedelta(minutes=1)
filters = (
f"timestamp>=\"{filter_date.isoformat('T')}\" "
"resource.type=cloud_run_revision "
f"AND resource.labels.service_name={CLOUD_RUN_SERVICE} "
)
# Retry a maximum number of 10 times to find results in stackdriver
found = False
for x in range(10):
iterator = client.list_log_entries(resource_names, filter_=filters)
for entry in iterator:
if entry.text_payload == "Hello Runner!":
found = True
break
# When message found, exit loop
if found is True:
break
time.sleep(5) # Slight delay before retry
assert found
|
[] |
[] |
[
"GOOGLE_CLOUD_PROJECT"
] |
[]
|
["GOOGLE_CLOUD_PROJECT"]
|
python
| 1 | 0 | |
t2t_bert/test/test_oqmrc_final.py
|
import sys,os
sys.path.append("..")
from model_io import model_io
import numpy as np
import tensorflow as tf
from example import bert_classifier
from bunch import Bunch
from example import feature_writer, write_to_tfrecords, classifier_processor
from data_generator import tokenization
from data_generator import tf_data_utils
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"eval_data_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"output_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"config_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"init_checkpoint", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"result_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"vocab_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"label_id", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"max_length", 128,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"train_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"dev_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_output", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"gpu_id", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"epoch", 5,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"num_classes", 3,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"train_size", 249847,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"batch_size", 16,
"Input TF example files (can be a glob or comma separated).")
graph = tf.Graph()
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
with graph.as_default():
import json
# config = json.load(open("/data/xuht/bert/chinese_L-12_H-768_A-12/bert_config.json", "r"))
# init_checkpoint = "/data/xuht/bert/chinese_L-12_H-768_A-12/bert_model.ckpt"
config = json.load(open(FLAGS.config_file))
init_checkpoint = FLAGS.init_checkpoint
# init_checkpoint = "/data/xuht/ai_challenge_cqmrc/bert/concat/model/oqmrc.ckpt"
config = Bunch(config)
config.use_one_hot_embeddings = True
config.scope = "bert"
config.dropout_prob = 0.1
config.label_type = "single_label"
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu_id
sess = tf.Session()
num_train_steps = int(
FLAGS.train_size / FLAGS.batch_size * FLAGS.epoch)
num_warmup_steps = int(num_train_steps * 0.1)
num_storage_steps = int(FLAGS.train_size / FLAGS.batch_size)
opt_config = Bunch({"init_lr":1e-5,
"num_train_steps":num_train_steps,
"num_warmup_steps":num_warmup_steps})
model_io_config = Bunch({"fix_lm":False})
model_io_fn = model_io.ModelIO(model_io_config)
num_choice = FLAGS.num_classes
max_seq_length = FLAGS.max_length
model_train_fn = bert_classifier.multichoice_model_fn_builder(config, num_choice, init_checkpoint,
reuse=None,
load_pretrained=True,
model_io_fn=model_io_fn,
model_io_config=model_io_config,
opt_config=opt_config)
model_eval_fn = bert_classifier.multichoice_model_fn_builder(config, num_choice, init_checkpoint,
reuse=True,
load_pretrained=True,
model_io_fn=model_io_fn,
model_io_config=model_io_config,
opt_config=opt_config)
def metric_fn(features, logits, loss):
print(logits.get_shape(), "===logits shape===")
pred_label = tf.argmax(logits, axis=-1, output_type=tf.int32)
prob = tf.nn.softmax(logits)
accuracy = correct = tf.equal(
tf.cast(pred_label, tf.int32),
tf.cast(features["label_ids"], tf.int32)
)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
return {"accuracy":accuracy, "loss":loss, "pred_label":pred_label, "label_ids":features["label_ids"]}
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length*num_choice], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length*num_choice], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length*num_choice], tf.int64),
"label_ids":
tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example.
"""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
for name in ["input_ids", "input_mask", "segment_ids"]:
example[name] = tf.reshape(example[name], [-1, max_seq_length])
return example
params = Bunch({})
params.epoch = FLAGS.epoch
params.batch_size = FLAGS.batch_size
train_features = tf_data_utils.train_input_fn(FLAGS.train_file,
_decode_record, name_to_features, params)
eval_features = tf_data_utils.eval_input_fn(FLAGS.dev_file,
_decode_record, name_to_features, params)
[train_op, train_loss, train_per_example_loss, train_logits] = model_train_fn(train_features, [], tf.estimator.ModeKeys.TRAIN)
[_, eval_loss, eval_per_example_loss, eval_logits] = model_eval_fn(eval_features, [], tf.estimator.ModeKeys.EVAL)
result = metric_fn(eval_features, eval_logits, eval_loss)
model_io_fn.set_saver()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
def eval_fn(result):
i = 0
total_accuracy = 0
label, label_id = [], []
while True:
try:
eval_result = sess.run(result)
total_accuracy += eval_result["accuracy"]
label_id.extend(eval_result["label_ids"])
label.extend(eval_result["pred_label"])
i += 1
except tf.errors.OutOfRangeError:
print("End of dataset")
break
f1 = f1_score(label_id, label, average="macro")
accuracy = accuracy_score(label_id, label)
print("test accuracy accuracy {} {}, f1 {}".format(total_accuracy/i,
accuracy, f1))
return total_accuracy/ i, f1
def train_fn(op, loss):
i = 0
cnt = 0
total_loss = 0.0
while True:
try:
[_, train_loss] = sess.run([op, loss])
total_loss += train_loss
i += 1
cnt += 1
if np.mod(i, num_storage_steps) == 0:
print(total_loss/cnt)
model_io_fn.save_model(sess, FLAGS.model_output+"/oqmrc_{}.ckpt".format(int(i/num_storage_steps)))
total_loss = 0
cnt = 0
except tf.errors.OutOfRangeError:
break
print("===========begin to train============")
train_fn(train_op, train_loss)
print("===========begin to eval============")
eval_fn(result)
model_io_fn.save_model(sess, FLAGS.model_output+"/oqmrc.ckpt")
if __name__ == "__main__":
flags.mark_flag_as_required("eval_data_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("result_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("train_file")
flags.mark_flag_as_required("dev_file")
flags.mark_flag_as_required("max_length")
flags.mark_flag_as_required("model_output")
flags.mark_flag_as_required("gpu_id")
flags.mark_flag_as_required("epoch")
flags.mark_flag_as_required("num_classes")
tf.app.run()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
src/main/java/com/twilio/Twilio.java
|
package com.twilio;
import com.twilio.exception.ApiException;
import com.twilio.exception.AuthenticationException;
import com.twilio.exception.CertificateValidationException;
import com.twilio.http.HttpMethod;
import com.twilio.http.NetworkHttpClient;
import com.twilio.http.Request;
import com.twilio.http.Response;
import com.twilio.http.TwilioRestClient;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.io.File;
/**
* Singleton class to initialize Twilio environment.
*/
public class Twilio {
public static final String VERSION = "8.25.1";
public static final String JAVA_VERSION = System.getProperty("java.version");
private static String username = System.getenv("TWILIO_ACCOUNT_SID");
private static String password = System.getenv("TWILIO_AUTH_TOKEN");
private static String accountSid; // username used if this is null
private static String region = System.getenv("TWILIO_REGION");
private static String edge = System.getenv("TWILIO_EDGE");
private static volatile TwilioRestClient restClient;
private static volatile ExecutorService executorService;
private Twilio() {
}
/*
* Ensures that the ExecutorService is shutdown when the JVM exits.
*/
static {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
if (executorService != null) {
executorService.shutdownNow();
}
}
});
}
/**
* Initialize the Twilio environment.
*
* @param username account to use
* @param password auth token for the account
*/
public static synchronized void init(final String username, final String password) {
Twilio.setUsername(username);
Twilio.setPassword(password);
}
/**
* Initialize the Twilio environment.
*
* @param username account to use
* @param password auth token for the account
* @param accountSid account sid to use
*/
public static synchronized void init(final String username, final String password, final String accountSid) {
Twilio.setUsername(username);
Twilio.setPassword(password);
Twilio.setAccountSid(accountSid);
}
/**
* Set the username.
*
* @param username account to use
* @throws AuthenticationException if username is null
*/
public static synchronized void setUsername(final String username) {
if (username == null) {
throw new AuthenticationException("Username can not be null");
}
if (!username.equals(Twilio.username)) {
Twilio.invalidate();
}
Twilio.username = username;
}
/**
* Set the auth token.
*
* @param password auth token to use
* @throws AuthenticationException if password is null
*/
public static synchronized void setPassword(final String password) {
if (password == null) {
throw new AuthenticationException("Password can not be null");
}
if (!password.equals(Twilio.password)) {
Twilio.invalidate();
}
Twilio.password = password;
}
/**
* Set the account sid.
*
* @param accountSid account sid to use
* @throws AuthenticationException if account sid is null
*/
public static synchronized void setAccountSid(final String accountSid) {
if (accountSid == null) {
throw new AuthenticationException("AccountSid can not be null");
}
if (!accountSid.equals(Twilio.accountSid)) {
Twilio.invalidate();
}
Twilio.accountSid = accountSid;
}
/**
* Set the region.
*
* @param region region to make request
*/
public static synchronized void setRegion(final String region) {
if (!Objects.equals(region, Twilio.region)) {
Twilio.invalidate();
}
Twilio.region = region;
}
/**
* Set the edge.
*
* @param edge edge to make request
*/
public static synchronized void setEdge(final String edge) {
if (!Objects.equals(edge, Twilio.edge)) {
Twilio.invalidate();
}
Twilio.edge = edge;
}
/**
* Returns (and initializes if not initialized) the Twilio Rest Client.
*
* @return the Twilio Rest Client
* @throws AuthenticationException if initialization required and either accountSid or authToken is null
*/
public static TwilioRestClient getRestClient() {
if (Twilio.restClient == null) {
synchronized (Twilio.class) {
if (Twilio.restClient == null) {
Twilio.restClient = buildRestClient();
}
}
}
return Twilio.restClient;
}
private static TwilioRestClient buildRestClient() {
if (Twilio.username == null || Twilio.password == null) {
throw new AuthenticationException(
"TwilioRestClient was used before AccountSid and AuthToken were set, please call Twilio.init()"
);
}
TwilioRestClient.Builder builder = new TwilioRestClient.Builder(Twilio.username, Twilio.password);
if (Twilio.accountSid != null) {
builder.accountSid(Twilio.accountSid);
}
builder.region(Twilio.region);
builder.edge(Twilio.edge);
return builder.build();
}
/**
* Use a custom rest client.
*
* @param restClient rest client to use
*/
public static void setRestClient(final TwilioRestClient restClient) {
synchronized (Twilio.class) {
Twilio.restClient = restClient;
}
}
/**
* Returns the Twilio executor service.
*
* @return the Twilio executor service
*/
public static ExecutorService getExecutorService() {
if (Twilio.executorService == null) {
synchronized (Twilio.class) {
if (Twilio.executorService == null) {
Twilio.executorService = Executors.newCachedThreadPool();
}
}
}
return Twilio.executorService;
}
/**
* Use a custom executor service.
*
* @param executorService executor service to use
*/
public static void setExecutorService(final ExecutorService executorService) {
synchronized (Twilio.class) {
Twilio.executorService = executorService;
}
}
/**
* Validate that we can connect to the new SSL certificate posted on api.twilio.com.
*
* @throws CertificateValidationException if the connection fails
*/
public static void validateSslCertificate() {
final NetworkHttpClient client = new NetworkHttpClient();
final Request request = new Request(HttpMethod.GET, "https://api.twilio.com:8443");
try {
final Response response = client.makeRequest(request);
if (!TwilioRestClient.SUCCESS.test(response.getStatusCode())) {
throw new CertificateValidationException(
"Unexpected response from certificate endpoint", request, response
);
}
} catch (final ApiException e) {
throw new CertificateValidationException("Could not get response from certificate endpoint", request);
}
}
/**
* Invalidates the volatile state held in the Twilio singleton.
*/
private static void invalidate() {
Twilio.restClient = null;
}
/**
* Attempts to gracefully shutdown the ExecutorService if it is present.
*/
public static synchronized void destroy() {
if (executorService != null) {
executorService.shutdown();
}
}
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\"",
"\"TWILIO_REGION\"",
"\"TWILIO_EDGE\""
] |
[] |
[
"TWILIO_EDGE",
"TWILIO_REGION",
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_EDGE", "TWILIO_REGION", "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
java
| 4 | 0 | |
nottreal/utils/dir.py
|
from distutils.dir_util import copy_tree
import os
import platform
import subprocess
import sys
class DirUtils:
@staticmethod
def cp(source_dir, target_dir):
"""
Copy the contents of one directory to another
Arguments:
source_dir {str} -- Source directory to copy from
target_dir {str} -- Target directory to copy top
Returns:
{bool}
"""
return copy_tree(source_dir, target_dir)
@staticmethod
def is_empty_or_create(directory):
if os.path.isdir(directory):
contents = [f for f in os.listdir(directory)]
return len(contents) == 0
else:
os.mkdir(directory)
return True
return False
@staticmethod
def pwd():
"""
Get the present working directory
Returns:
{str}
"""
if getattr(sys, 'frozen', False):
if platform.system() == 'Darwin':
return os.path.join(sys._MEIPASS, '..', 'Resources')
else:
return sys._MEIPASS
else:
return os.getcwd()
def open_in_os(path):
"""
Opens in the OS directory explorer/finder
Arguments:
path {str} -- Path to open
"""
if not os.path.isdir(path):
return False
if platform.system() == 'Windows':
explorer = os.path.join(os.getenv('WINDIR'), 'explorer.exe')
subprocess.run([
explorer,
path])
elif platform.system() == 'Darwin':
subprocess.call(['open', '-a', 'Finder', path])
elif platform.system() == 'Linux':
subprocess.Popen(['xdg-open', path])
def reveal_file_in_os(path):
"""
Opens in the OS directory explorer/finder to reveal a file.
This only works in macOS at the moment.
Arguments:
path {str} -- Path to open
"""
if not os.path.isfile(path):
return False
if platform.system() == 'Darwin':
subprocess.run(['open', '-R', path])
|
[] |
[] |
[
"WINDIR"
] |
[]
|
["WINDIR"]
|
python
| 1 | 0 | |
x-pack/filebeat/input/gcppubsub/pubsub_test.go
|
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
package gcppubsub
import (
"context"
"io/ioutil"
"net/http"
"os"
"strconv"
"sync"
"testing"
"time"
"cloud.google.com/go/pubsub"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
"google.golang.org/api/iterator"
"github.com/elastic/beats/v7/filebeat/channel"
"github.com/elastic/beats/v7/filebeat/input"
"github.com/elastic/beats/v7/libbeat/beat"
"github.com/elastic/beats/v7/libbeat/common"
"github.com/elastic/beats/v7/libbeat/common/atomic"
"github.com/elastic/beats/v7/libbeat/logp"
"github.com/elastic/beats/v7/libbeat/tests/compose"
"github.com/elastic/beats/v7/libbeat/tests/resources"
)
const (
emulatorProjectID = "test-project-id"
emulatorTopic = "test-topic-foo"
emulatorSubscription = "test-subscription-bar"
)
var once sync.Once
func testSetup(t *testing.T) (*pubsub.Client, context.CancelFunc) {
t.Helper()
var host string
if isInDockerIntegTestEnv() {
// We're running inside out integration test environment so
// make sure that that googlepubsub container is running.
host = compose.EnsureUp(t, "googlepubsub").Host()
os.Setenv("PUBSUB_EMULATOR_HOST", host)
} else {
host = os.Getenv("PUBSUB_EMULATOR_HOST")
if host == "" {
t.Skip("PUBSUB_EMULATOR_HOST is not set in environment. You can start " +
"the emulator with \"docker-compose up\" from the _meta directory. " +
"The default address is PUBSUB_EMULATOR_HOST=localhost:8432")
}
}
once.Do(func() {
logp.TestingSetup()
// Disable HTTP keep-alives to ensure no extra goroutines hang around.
httpClient := http.Client{Transport: &http.Transport{DisableKeepAlives: true}}
// Sanity check the emulator.
resp, err := httpClient.Get("http://" + host)
if err != nil {
t.Fatalf("pubsub emulator at %s is not healthy: %v", host, err)
}
defer resp.Body.Close()
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("failed to read response", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("pubsub emulator is not healthy, got status code %d", resp.StatusCode)
}
})
ctx, cancel := context.WithCancel(context.Background())
client, err := pubsub.NewClient(ctx, emulatorProjectID)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
resetPubSub(t, client)
return client, cancel
}
func resetPubSub(t *testing.T, client *pubsub.Client) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Clear topics.
topics := client.Topics(ctx)
for {
topic, err := topics.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatal(err)
}
if err = topic.Delete(ctx); err != nil {
t.Fatalf("failed to delete topic %v: %v", topic.ID(), err)
}
}
// Clear subscriptions.
subs := client.Subscriptions(ctx)
for {
sub, err := subs.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatal(err)
}
if err = sub.Delete(ctx); err != nil {
t.Fatalf("failed to delete subscription %v: %v", sub.ID(), err)
}
}
}
func createTopic(t *testing.T, client *pubsub.Client) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
topic := client.Topic(emulatorTopic)
exists, err := topic.Exists(ctx)
if err != nil {
t.Fatalf("failed to check if topic exists: %v", err)
}
if !exists {
if topic, err = client.CreateTopic(ctx, emulatorTopic); err != nil {
t.Fatalf("failed to create the topic: %v", err)
}
t.Log("Topic created:", topic.ID())
}
}
func publishMessages(t *testing.T, client *pubsub.Client, numMsgs int) []string {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
topic := client.Topic(emulatorTopic)
defer topic.Stop()
messageIDs := make([]string, numMsgs)
for i := 0; i < numMsgs; i++ {
result := topic.Publish(ctx, &pubsub.Message{
Data: []byte(time.Now().UTC().Format(time.RFC3339Nano) + ": hello world " + strconv.Itoa(i)),
})
// Wait for message to publish and get assigned ID.
id, err := result.Get(ctx)
if err != nil {
t.Fatal(err)
}
messageIDs[i] = id
}
t.Logf("Published %d messages to topic %v. ID range: [%v, %v]", len(messageIDs), topic.ID(), messageIDs[0], messageIDs[len(messageIDs)-1])
return messageIDs
}
func createSubscription(t *testing.T, client *pubsub.Client) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sub := client.Subscription(emulatorSubscription)
exists, err := sub.Exists(ctx)
if err != nil {
t.Fatalf("failed to check if sub exists: %v", err)
}
if exists {
return
}
sub, err = client.CreateSubscription(ctx, emulatorSubscription, pubsub.SubscriptionConfig{
Topic: client.Topic(emulatorTopic),
})
if err != nil {
t.Fatalf("failed to create subscription: %v", err)
}
t.Log("New subscription created:", sub.ID())
}
func ifNotDone(ctx context.Context, f func()) func() {
return func() {
select {
case <-ctx.Done():
return
default:
}
f()
}
}
func defaultTestConfig() *common.Config {
return common.MustNewConfigFrom(map[string]interface{}{
"project_id": emulatorProjectID,
"topic": emulatorTopic,
"subscription": map[string]interface{}{
"name": emulatorSubscription,
"create": true,
},
"credentials_file": "testdata/fake.json",
})
}
func isInDockerIntegTestEnv() bool {
return os.Getenv("BEATS_INSIDE_INTEGRATION_TEST_ENV") != ""
}
func runTest(t *testing.T, cfg *common.Config, run func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T)) {
runTestWithACKer(t, cfg, ackEvent, run)
}
func runTestWithACKer(t *testing.T, cfg *common.Config, onEvent eventHandler, run func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T)) {
if !isInDockerIntegTestEnv() {
// Don't test goroutines when using our compose.EnsureUp.
defer resources.NewGoroutinesChecker().Check(t)
}
// Create pubsub client for setting up and communicating to emulator.
client, clientCancel := testSetup(t)
defer clientCancel()
defer client.Close()
// Simulate input.Context from Filebeat input runner.
inputCtx := newInputContext()
defer close(inputCtx.Done)
// Stub outlet for receiving events generated by the input.
eventOutlet := newStubOutlet(onEvent)
defer eventOutlet.Close()
connector := channel.ConnectorFunc(func(_ *common.Config, cliCfg beat.ClientConfig) (channel.Outleter, error) {
eventOutlet.setClientConfig(cliCfg)
return eventOutlet, nil
})
in, err := NewInput(cfg, connector, inputCtx)
if err != nil {
t.Fatal(err)
}
pubsubInput := in.(*pubsubInput)
defer pubsubInput.Stop()
run(client, pubsubInput, eventOutlet, t)
}
func newInputContext() input.Context {
return input.Context{
Done: make(chan struct{}),
}
}
type eventHandler func(beat.Event, beat.ClientConfig) bool
type stubOutleter struct {
sync.Mutex
cond *sync.Cond
done bool
Events []beat.Event
clientCfg beat.ClientConfig
eventHandler eventHandler
}
func newStubOutlet(onEvent eventHandler) *stubOutleter {
o := &stubOutleter{
eventHandler: onEvent,
}
o.cond = sync.NewCond(o)
return o
}
func ackEvent(ev beat.Event, cfg beat.ClientConfig) bool {
if cfg.ACKHandler == nil {
return false
}
cfg.ACKHandler.AddEvent(ev, true)
cfg.ACKHandler.ACKEvents(1)
return true
}
func (o *stubOutleter) setClientConfig(cfg beat.ClientConfig) {
o.Lock()
defer o.Unlock()
o.clientCfg = cfg
}
func (o *stubOutleter) waitForEvents(numEvents int) ([]beat.Event, bool) {
o.Lock()
defer o.Unlock()
for len(o.Events) < numEvents && !o.done {
o.cond.Wait()
}
size := numEvents
if size >= len(o.Events) {
size = len(o.Events)
}
out := make([]beat.Event, size)
copy(out, o.Events)
return out, len(out) == numEvents
}
func (o *stubOutleter) Close() error {
o.Lock()
defer o.Unlock()
o.done = true
return nil
}
func (o *stubOutleter) Done() <-chan struct{} { return nil }
func (o *stubOutleter) OnEvent(event beat.Event) bool {
o.Lock()
defer o.Unlock()
acked := o.eventHandler(event, o.clientCfg)
if acked {
o.Events = append(o.Events, event)
o.cond.Broadcast()
}
return !o.done
}
// --- Test Cases
func TestTopicDoesNotExist(t *testing.T) {
cfg := defaultTestConfig()
runTest(t, cfg, func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T) {
err := input.run()
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "failed to subscribe to pub/sub topic")
}
})
}
func TestSubscriptionDoesNotExistError(t *testing.T) {
cfg := defaultTestConfig()
cfg.SetBool("subscription.create", -1, false)
runTest(t, cfg, func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T) {
createTopic(t, client)
err := input.run()
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "no subscription exists and 'subscription.create' is not enabled")
}
})
}
func TestSubscriptionExists(t *testing.T) {
cfg := defaultTestConfig()
runTest(t, cfg, func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T) {
createTopic(t, client)
createSubscription(t, client)
publishMessages(t, client, 5)
var group errgroup.Group
group.Go(input.run)
time.AfterFunc(10*time.Second, func() { out.Close() })
events, ok := out.waitForEvents(5)
if !ok {
t.Fatalf("Expected 5 events, but got %d.", len(events))
}
input.Stop()
if err := group.Wait(); err != nil {
t.Fatal(err)
}
})
}
func TestSubscriptionCreate(t *testing.T) {
cfg := defaultTestConfig()
runTest(t, cfg, func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T) {
createTopic(t, client)
group, ctx := errgroup.WithContext(context.Background())
group.Go(input.run)
time.AfterFunc(1*time.Second, ifNotDone(ctx, func() { publishMessages(t, client, 5) }))
time.AfterFunc(10*time.Second, func() { out.Close() })
events, ok := out.waitForEvents(5)
if !ok {
t.Fatalf("Expected 5 events, but got %d.", len(events))
}
input.Stop()
if err := group.Wait(); err != nil {
t.Fatal(err)
}
})
}
func TestRunStop(t *testing.T) {
cfg := defaultTestConfig()
runTest(t, cfg, func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T) {
input.Run()
input.Stop()
input.Run()
input.Stop()
})
}
func TestEndToEndACK(t *testing.T) {
cfg := defaultTestConfig()
var count atomic.Int
seen := make(map[string]struct{})
// ACK every other message
halfAcker := func(ev beat.Event, clientConfig beat.ClientConfig) bool {
msg := ev.Private.(*pubsub.Message)
seen[msg.ID] = struct{}{}
if count.Inc()&1 != 0 {
// Nack will result in the Message being redelivered more quickly than if it were allowed to expire.
msg.Nack()
return false
}
return ackEvent(ev, clientConfig)
}
runTestWithACKer(t, cfg, halfAcker, func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T) {
createTopic(t, client)
createSubscription(t, client)
group, _ := errgroup.WithContext(context.Background())
group.Go(input.run)
const numMsgs = 10
publishMessages(t, client, numMsgs)
events, ok := out.waitForEvents(numMsgs)
if !ok {
t.Fatalf("Expected %d events, but got %d.", 1, len(events))
}
// Assert that all messages were eventually received
assert.Len(t, events, len(seen))
got := make(map[string]struct{})
for _, ev := range events {
msg := ev.Private.(*pubsub.Message)
got[msg.ID] = struct{}{}
}
for id := range seen {
_, exists := got[id]
assert.True(t, exists)
}
input.Stop()
out.Close()
if err := group.Wait(); err != nil {
t.Fatal(err)
}
})
}
|
[
"\"PUBSUB_EMULATOR_HOST\"",
"\"BEATS_INSIDE_INTEGRATION_TEST_ENV\""
] |
[] |
[
"PUBSUB_EMULATOR_HOST",
"BEATS_INSIDE_INTEGRATION_TEST_ENV"
] |
[]
|
["PUBSUB_EMULATOR_HOST", "BEATS_INSIDE_INTEGRATION_TEST_ENV"]
|
go
| 2 | 0 | |
cmd/list/lambda.go
|
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/nzai/qr/utils"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/nzai/qr/constants"
"github.com/nzai/qr/exchanges"
"github.com/nzai/qr/messages"
"github.com/nzai/qr/quotes"
"go.uber.org/zap"
)
func main() {
logger, _ := zap.NewDevelopment()
defer logger.Sync()
undo := zap.ReplaceGlobals(logger)
defer undo()
config := new(Config)
err := config.GetFromEnvironmentVariable()
if err != nil {
zap.L().Fatal("get environment variables failed", zap.Error(err))
}
zap.L().Info("get environment variables success")
creds := credentials.NewStaticCredentialsFromCreds(credentials.Value{AccessKeyID: config.AccessKeyID, SecretAccessKey: config.SecretAccessKey})
awsConfig := aws.NewConfig().WithCredentials(creds).WithRegion(config.Region).WithMaxRetries(config.MaxRetry)
awsSession, err := session.NewSession(awsConfig)
if err != nil {
zap.L().Fatal("new aws session failed", zap.Error(err))
}
var exs []exchanges.Exchange
for _, exchangeCode := range config.ExchangeCodes {
exchange, found := exchanges.Get(exchangeCode)
if !found {
zap.L().Fatal("exchange not found", zap.Error(err), zap.String("exchange code", exchangeCode))
}
exs = append(exs, exchange)
}
zap.L().Info("get exchanges success", zap.Strings("exchange codes", config.ExchangeCodes))
list := NewLister(config, exs, sqs.New(awsSession))
lambda.Start(list.Handler)
}
// Config define lambda config
type Config struct {
ExchangeCodes []string
AccessKeyID string
SecretAccessKey string
Region string
MaxRetry int
QueueURL string
}
// GetFromEnvironmentVariable read config from environment variables
func (c *Config) GetFromEnvironmentVariable() error {
exchangeCodes := strings.TrimSpace(os.Getenv("ExchangeCodes"))
if exchangeCodes == "" {
return fmt.Errorf("exchange code invalid")
}
accessKeyID := strings.TrimSpace(os.Getenv("AccessKeyID"))
if accessKeyID == "" {
return fmt.Errorf("AccessKeyID invalid")
}
secretAccessKey := strings.TrimSpace(os.Getenv("SecretAccessKey"))
if secretAccessKey == "" {
return fmt.Errorf("SecretAccessKey invalid")
}
region := strings.TrimSpace(os.Getenv("Region"))
if region == "" {
return fmt.Errorf("Region invalid")
}
maxRetry, err := strconv.Atoi(strings.TrimSpace(os.Getenv("MaxRetry")))
if err != nil {
maxRetry = constants.RetryCount
}
queueURL := strings.TrimSpace(os.Getenv("QueueUrl"))
if queueURL == "" {
return fmt.Errorf("QueueUrl invalid")
}
c.ExchangeCodes = strings.Split(exchangeCodes, ",")
c.AccessKeyID = accessKeyID
c.SecretAccessKey = secretAccessKey
c.Region = region
c.MaxRetry = maxRetry
c.QueueURL = queueURL
return nil
}
// Lister define list service
type Lister struct {
config *Config
exchanges []exchanges.Exchange
sqsClient *sqs.SQS
}
// NewLister create list service
func NewLister(config *Config, exchanges []exchanges.Exchange, sqsClient *sqs.SQS) *Lister {
return &Lister{config, exchanges, sqsClient}
}
// Handler process lambda event
func (s Lister) Handler(ctx context.Context, event events.CloudWatchEvent) {
wg := new(sync.WaitGroup)
wg.Add(len(s.exchanges))
for _, exchange := range s.exchanges {
go s.listExchangeCompanies(exchange, wg)
}
wg.Wait()
}
func (s Lister) listExchangeCompanies(exchange exchanges.Exchange, wg *sync.WaitGroup) {
defer wg.Done()
start := time.Now()
yesterday := utils.YesterdayZero(start.In(exchange.Location()))
// 获取上市公司
companies, err := exchange.Companies()
if err != nil {
zap.L().Error("get exchange companies failed", zap.Error(err), zap.String("exchange", exchange.Code()), zap.Time("date", yesterday))
return
}
count := len(companies)
zap.L().Info("list companies success",
zap.String("exchange", exchange.Code()),
zap.Time("date", yesterday),
zap.Int("companies", count),
zap.Duration("elapsed", time.Now().Sub(start)))
// 发送sqs消息
failedCount, err := s.send2sqs(exchange, companies, yesterday)
if err != nil {
zap.L().Error("send exchange daily company message failed",
zap.Error(err),
zap.String("exchange", exchange.Code()),
zap.Time("date", yesterday),
zap.Int("companies", count))
return
}
zap.L().Info("send exchange daily company message finished",
zap.String("exchange", exchange.Code()),
zap.Time("date", yesterday),
zap.Int("companies", count),
zap.Int("success", count-failedCount),
zap.Int("failed", failedCount),
zap.Duration("elapsed", time.Now().Sub(start)))
}
func (s Lister) send2sqs(exchange exchanges.Exchange, companies map[string]*quotes.Company, date time.Time) (int, error) {
input := &sqs.SendMessageBatchInput{
QueueUrl: aws.String(s.config.QueueURL),
Entries: make([]*sqs.SendMessageBatchRequestEntry, 0, constants.AwsSqsMaxBatchSize),
}
var failedCount int
index := -1
for _, company := range companies {
index++
body, err := json.Marshal(&messages.CompanyDaily{
Exchange: exchange.Code(),
Company: company,
Date: date,
})
if err != nil {
zap.L().Error("marshal company daily message failed",
zap.Error(err),
zap.String("exchange", exchange.Code()),
zap.Any("company", company),
zap.Time("date", date))
return 0, err
}
input.Entries = append(input.Entries, &sqs.SendMessageBatchRequestEntry{
Id: aws.String(fmt.Sprintf("%s-%d", exchange.Code(), index)),
MessageBody: aws.String(string(body)),
})
if len(input.Entries) != constants.AwsSqsMaxBatchSize && index < len(companies)-1 {
continue
}
output, err := s.sqsClient.SendMessageBatch(input)
if err != nil {
zap.L().Error("batch send company daily messages failed",
zap.Error(err),
zap.String("exchange", exchange.Code()),
zap.Any("company", company))
return 0, err
}
for _, failed := range output.Failed {
failedCount++
zap.L().Error("send company daily message failed",
zap.String("error", failed.GoString()),
zap.String("exchange", exchange.Code()),
zap.Any("company", company))
}
// clear all
input.Entries = input.Entries[:0]
}
return failedCount, nil
}
|
[
"\"ExchangeCodes\"",
"\"AccessKeyID\"",
"\"SecretAccessKey\"",
"\"Region\"",
"\"MaxRetry\"",
"\"QueueUrl\""
] |
[] |
[
"MaxRetry",
"AccessKeyID",
"ExchangeCodes",
"Region",
"SecretAccessKey",
"QueueUrl"
] |
[]
|
["MaxRetry", "AccessKeyID", "ExchangeCodes", "Region", "SecretAccessKey", "QueueUrl"]
|
go
| 6 | 0 | |
lib/simple_config.py
|
import ast
import json
import threading
import os
from copy import deepcopy
from util import user_dir, print_error, print_msg, print_stderr, PrintError
from bitcoin import MAX_FEE_RATE, FEE_TARGETS
SYSTEM_CONFIG_PATH = "/etc/electrum-vtc.conf"
config = None
def get_config():
global config
return config
def set_config(c):
global config
config = c
class SimpleConfig(PrintError):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are 3 different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
3. System configuration (in /etc/)
They are taken in order (1. overrides config options set in 2., that
override config set in 3.)
"""
def __init__(self, options={}, read_system_config_function=None,
read_user_config_function=None, read_user_dir_function=None):
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.fee_estimates = {}
# The following two functions are there for dependency injection when
# testing.
if read_system_config_function is None:
read_system_config_function = read_system_config
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# Portable wallets don't use a system config
if self.cmdline_options.get('portable', False):
self.system_config = {}
else:
self.system_config = read_system_config_function()
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
# Upgrade obsolete keys
self.fixup_keys({'auto_cycle': 'auto_connect'})
# Make a singleton instance of 'self'
set_config(self)
def electrum_path(self):
# Read electrum_path from command line / system configuration
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
if self.get('testnet'):
path = os.path.join(path, 'testnet')
elif self.get('nolnet'):
path = os.path.join(path, 'nolnet')
# Make directory if it does not yet exist.
if not os.path.exists(path):
if os.path.islink(path):
raise BaseException('Dangling link: ' + path)
os.mkdir(path)
self.print_error("electrum directory", path)
return path
def fixup_config_keys(self, config, keypairs):
updated = False
for old_key, new_key in keypairs.iteritems():
if old_key in config:
if not new_key in config:
config[new_key] = config[old_key]
del config[old_key]
updated = True
return updated
def fixup_keys(self, keypairs):
'''Migrate old key names to new ones'''
self.fixup_config_keys(self.cmdline_options, keypairs)
self.fixup_config_keys(self.system_config, keypairs)
if self.fixup_config_keys(self.user_config, keypairs):
self.save_user_config()
def set_key(self, key, value, save = True):
if not self.is_modifiable(key):
print_stderr("Warning: not changing config key '%s' set on the command line" % key)
return
with self.lock:
self.user_config[key] = value
if save:
self.save_user_config()
return
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key)
if out is None:
out = self.system_config.get(key, default)
return out
def is_modifiable(self, key):
return not key in self.cmdline_options
def save_user_config(self):
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
f = open(path, "w")
f.write(s)
f.close()
if 'ANDROID_DATA' not in os.environ:
import stat
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
def get_wallet_path(self):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd'), self.get('wallet_path'))
# path in config file
path = self.get('default_wallet_path')
if path and os.path.exists(path):
return path
# default path
dirpath = os.path.join(self.path, "wallets")
if not os.path.exists(dirpath):
if os.path.islink(dirpath):
raise BaseException('Dangling link: ' + dirpath)
os.mkdir(dirpath)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum-vtc.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.print_error("session timeout -> %d seconds" % seconds)
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def open_last_wallet(self):
if self.get('wallet_path') is None:
last_wallet = self.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.cmdline_options['default_wallet_path'] = last_wallet
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def max_fee_rate(self):
f = self.get('max_fee_rate', MAX_FEE_RATE)
if f==0:
f = MAX_FEE_RATE
return f
def dynfee(self, i):
if i < 4:
j = FEE_TARGETS[i]
fee = self.fee_estimates.get(j)
else:
assert i == 4
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee/2
if fee is not None:
fee = min(5*MAX_FEE_RATE, fee)
return fee
def reverse_dynfee(self, fee_per_kb):
import operator
l = self.fee_estimates.items() + [(1, self.dynfee(4))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def has_fee_estimates(self):
return len(self.fee_estimates)==4
def is_dynfee(self):
return self.get('dynamic_fees', False)
def fee_per_kb(self):
dyn = self.is_dynfee()
if dyn:
fee_rate = self.dynfee(self.get('fee_level', 2))
else:
fee_rate = self.get('fee_per_kb', self.max_fee_rate()/10)
return fee_rate
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def read_system_config(path=SYSTEM_CONFIG_PATH):
"""Parse and return the system config settings in /etc/electrum-vtc.conf."""
result = {}
if os.path.exists(path):
try:
import ConfigParser
except ImportError:
print "cannot parse electrum-vtc.conf. please install ConfigParser"
return
p = ConfigParser.ConfigParser()
try:
p.read(path)
for k, v in p.items('client'):
result[k] = v
except (ConfigParser.NoSectionError, ConfigParser.MissingSectionHeaderError):
pass
return result
def read_user_config(path):
"""Parse and store the user config settings in electrum-vtc.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r") as f:
data = f.read()
result = json.loads(data)
except:
print_error("Warning: Cannot read config file.", config_path)
return {}
if not type(result) is dict:
return {}
return result
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
etcdserver/api/rafthttp/peer.go
|
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rafthttp
import (
"context"
"sync"
"time"
"github.com/zhiyunliu/etcd/etcdserver/api/snap"
stats "github.com/zhiyunliu/etcd/etcdserver/api/v2stats"
"github.com/zhiyunliu/etcd/pkg/types"
"github.com/zhiyunliu/etcd/raft"
"github.com/zhiyunliu/etcd/raft/raftpb"
"go.uber.org/zap"
"golang.org/x/time/rate"
)
const (
// ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
// A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
// tcp keepalive failing to detect a bad connection, which is at minutes level.
// For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
// to keep the connection alive.
// For short term pipeline connections, the connection MUST be killed to avoid it being
// put back to http pkg connection pool.
ConnReadTimeout = 5 * time.Second
ConnWriteTimeout = 5 * time.Second
recvBufSize = 4096
// maxPendingProposals holds the proposals during one leader election process.
// Generally one leader election takes at most 1 sec. It should have
// 0-2 election conflicts, and each one takes 0.5 sec.
// We assume the number of concurrent proposers is smaller than 4096.
// One client blocks on its proposal for at least 1 sec, so 4096 is enough
// to hold all proposals.
maxPendingProposals = 4096
streamAppV2 = "streamMsgAppV2"
streamMsg = "streamMsg"
pipelineMsg = "pipeline"
sendSnap = "sendMsgSnap"
)
type Peer interface {
// send sends the message to the remote peer. The function is non-blocking
// and has no promise that the message will be received by the remote.
// When it fails to send message out, it will report the status to underlying
// raft.
send(m raftpb.Message)
// sendSnap sends the merged snapshot message to the remote peer. Its behavior
// is similar to send.
sendSnap(m snap.Message)
// update updates the urls of remote peer.
update(urls types.URLs)
// attachOutgoingConn attaches the outgoing connection to the peer for
// stream usage. After the call, the ownership of the outgoing
// connection hands over to the peer. The peer will close the connection
// when it is no longer used.
attachOutgoingConn(conn *outgoingConn)
// activeSince returns the time that the connection with the
// peer becomes active.
activeSince() time.Time
// stop performs any necessary finalization and terminates the peer
// elegantly.
stop()
}
// peer is the representative of a remote raft node. Local raft node sends
// messages to the remote through peer.
// Each peer has two underlying mechanisms to send out a message: stream and
// pipeline.
// A stream is a receiver initialized long-polling connection, which
// is always open to transfer messages. Besides general stream, peer also has
// a optimized stream for sending msgApp since msgApp accounts for large part
// of all messages. Only raft leader uses the optimized stream to send msgApp
// to the remote follower node.
// A pipeline is a series of http clients that send http requests to the remote.
// It is only used when the stream has not been established.
type peer struct {
lg *zap.Logger
localID types.ID
// id of the remote raft peer node
id types.ID
r Raft
status *peerStatus
picker *urlPicker
msgAppV2Writer *streamWriter
writer *streamWriter
pipeline *pipeline
snapSender *snapshotSender // snapshot sender to send v3 snapshot messages
msgAppV2Reader *streamReader
msgAppReader *streamReader
recvc chan raftpb.Message
propc chan raftpb.Message
mu sync.Mutex
paused bool
cancel context.CancelFunc // cancel pending works in go routine created by peer.
stopc chan struct{}
}
func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
if t.Logger != nil {
t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String()))
}
defer func() {
if t.Logger != nil {
t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String()))
}
}()
status := newPeerStatus(t.Logger, t.ID, peerID)
picker := newURLPicker(urls)
errorc := t.ErrorC
r := t.Raft
pipeline := &pipeline{
peerID: peerID,
tr: t,
picker: picker,
status: status,
followerStats: fs,
raft: r,
errorc: errorc,
}
pipeline.start()
p := &peer{
lg: t.Logger,
localID: t.ID,
id: peerID,
r: r,
status: status,
picker: picker,
msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
pipeline: pipeline,
snapSender: newSnapshotSender(t, picker, peerID, status),
recvc: make(chan raftpb.Message, recvBufSize),
propc: make(chan raftpb.Message, maxPendingProposals),
stopc: make(chan struct{}),
}
ctx, cancel := context.WithCancel(context.Background())
p.cancel = cancel
go func() {
for {
select {
case mm := <-p.recvc:
if err := r.Process(ctx, mm); err != nil {
if t.Logger != nil {
t.Logger.Warn("failed to process Raft message", zap.Error(err))
}
}
case <-p.stopc:
return
}
}
}()
// r.Process might block for processing proposal when there is no leader.
// Thus propc must be put into a separate routine with recvc to avoid blocking
// processing other raft messages.
go func() {
for {
select {
case mm := <-p.propc:
if err := r.Process(ctx, mm); err != nil {
if t.Logger != nil {
t.Logger.Warn("failed to process Raft message", zap.Error(err))
}
}
case <-p.stopc:
return
}
}
}()
p.msgAppV2Reader = &streamReader{
lg: t.Logger,
peerID: peerID,
typ: streamTypeMsgAppV2,
tr: t,
picker: picker,
status: status,
recvc: p.recvc,
propc: p.propc,
rl: rate.NewLimiter(t.DialRetryFrequency, 1),
}
p.msgAppReader = &streamReader{
lg: t.Logger,
peerID: peerID,
typ: streamTypeMessage,
tr: t,
picker: picker,
status: status,
recvc: p.recvc,
propc: p.propc,
rl: rate.NewLimiter(t.DialRetryFrequency, 1),
}
p.msgAppV2Reader.start()
p.msgAppReader.start()
return p
}
func (p *peer) send(m raftpb.Message) {
p.mu.Lock()
paused := p.paused
p.mu.Unlock()
if paused {
return
}
writec, name := p.pick(m)
select {
case writec <- m:
default:
p.r.ReportUnreachable(m.To)
if isMsgSnap(m) {
p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
}
if p.status.isActive() {
if p.lg != nil {
p.lg.Warn(
"dropped internal Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", p.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", p.id.String()),
zap.String("remote-peer-name", name),
zap.Bool("remote-peer-active", p.status.isActive()),
)
}
} else {
if p.lg != nil {
p.lg.Warn(
"dropped internal Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", p.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", p.id.String()),
zap.String("remote-peer-name", name),
zap.Bool("remote-peer-active", p.status.isActive()),
)
}
}
sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
}
}
func (p *peer) sendSnap(m snap.Message) {
go p.snapSender.send(m)
}
func (p *peer) update(urls types.URLs) {
p.picker.update(urls)
}
func (p *peer) attachOutgoingConn(conn *outgoingConn) {
var ok bool
switch conn.t {
case streamTypeMsgAppV2:
ok = p.msgAppV2Writer.attach(conn)
case streamTypeMessage:
ok = p.writer.attach(conn)
default:
if p.lg != nil {
p.lg.Panic("unknown stream type", zap.String("type", conn.t.String()))
}
}
if !ok {
conn.Close()
}
}
func (p *peer) activeSince() time.Time { return p.status.activeSince() }
// Pause pauses the peer. The peer will simply drops all incoming
// messages without returning an error.
func (p *peer) Pause() {
p.mu.Lock()
defer p.mu.Unlock()
p.paused = true
p.msgAppReader.pause()
p.msgAppV2Reader.pause()
}
// Resume resumes a paused peer.
func (p *peer) Resume() {
p.mu.Lock()
defer p.mu.Unlock()
p.paused = false
p.msgAppReader.resume()
p.msgAppV2Reader.resume()
}
func (p *peer) stop() {
if p.lg != nil {
p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String()))
}
defer func() {
if p.lg != nil {
p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String()))
}
}()
close(p.stopc)
p.cancel()
p.msgAppV2Writer.stop()
p.writer.stop()
p.pipeline.stop()
p.snapSender.stop()
p.msgAppV2Reader.stop()
p.msgAppReader.stop()
}
// pick picks a chan for sending the given message. The picked chan and the picked chan
// string name are returned.
func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
var ok bool
// Considering MsgSnap may have a big size, e.g., 1G, and will block
// stream for a long time, only use one of the N pipelines to send MsgSnap.
if isMsgSnap(m) {
return p.pipeline.msgc, pipelineMsg
} else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
return writec, streamAppV2
} else if writec, ok = p.writer.writec(); ok {
return writec, streamMsg
}
return p.pipeline.msgc, pipelineMsg
}
func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
nipype/utils/config.py
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Created on 20 Apr 2010
logging options : INFO, DEBUG
hash_method : content, timestamp
@author: Chris Filo Gorgolewski
'''
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
import sys
import errno
import atexit
from warnings import warn
from distutils.version import LooseVersion
import configparser
import numpy as np
from builtins import bytes, str, object, open
from simplejson import load, dump
from future import standard_library
from .misc import str2bool
from ..external import portalocker
standard_library.install_aliases()
CONFIG_DEPRECATIONS = {
'profile_runtime': ('monitoring.enabled', '1.0'),
'filemanip_level': ('logging.utils_level', '1.0'),
}
NUMPY_MMAP = LooseVersion(np.__version__) >= LooseVersion('1.12.0')
DEFAULT_CONFIG_TPL = """\
[logging]
workflow_level = INFO
utils_level = INFO
interface_level = INFO
log_to_file = false
log_directory = {log_dir}
log_size = 16384000
log_rotate = 4
[execution]
create_report = true
crashdump_dir = {crashdump_dir}
hash_method = timestamp
job_finished_timeout = 5
keep_inputs = false
local_hash_check = true
matplotlib_backend = Agg
plugin = Linear
remove_node_directories = false
remove_unnecessary_outputs = true
try_hard_link_datasink = true
single_thread_matlab = true
crashfile_format = pklz
stop_on_first_crash = false
stop_on_first_rerun = false
use_relative_paths = false
stop_on_unknown_version = false
write_provenance = false
parameterize_dirs = true
poll_sleep_duration = 2
xvfb_max_wait = 10
[monitoring]
enabled = false
sample_frequency = 1
summary_append = true
[check]
interval = 1209600
""".format
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class NipypeConfig(object):
"""Base nipype config class"""
def __init__(self, *args, **kwargs):
self._config = configparser.ConfigParser()
self._cwd = None
config_dir = os.path.expanduser('~/.nipype')
self.data_file = os.path.join(config_dir, 'nipype.json')
self.set_default_config()
self._display = None
self._resource_monitor = None
if os.path.exists(config_dir):
self._config.read(
[os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg'])
for option in CONFIG_DEPRECATIONS:
for section in ['execution', 'logging', 'monitoring']:
if self.has_option(section, option):
new_section, new_option = CONFIG_DEPRECATIONS[option][
0].split('.')
if not self.has_option(new_section, new_option):
# Warn implicit in get
self.set(new_section, new_option,
self.get(section, option))
@property
def cwd(self):
"""Cache current working directory ASAP"""
# Run getcwd only once, preventing multiproc to finish
# with error having changed to the wrong path
if self._cwd is None:
try:
self._cwd = os.getcwd()
except OSError:
warn('Trying to run Nipype from a nonexistent directory "{}".'.
format(os.getenv('PWD', 'unknown')), RuntimeWarning)
raise
return self._cwd
def set_default_config(self):
"""Read default settings template and set into config object"""
default_cfg = DEFAULT_CONFIG_TPL(
log_dir=os.path.expanduser(
'~'), # Get $HOME in a platform-agnostic way
crashdump_dir=self.cwd # Read cached cwd
)
try:
self._config.read_string(default_cfg) # Python >= 3.2
except AttributeError:
from io import StringIO
self._config.readfp(StringIO(default_cfg))
def enable_debug_mode(self):
"""Enables debug configuration"""
from .. import logging
self._config.set('execution', 'stop_on_first_crash', 'true')
self._config.set('execution', 'remove_unnecessary_outputs', 'false')
self._config.set('execution', 'keep_inputs', 'true')
self._config.set('logging', 'workflow_level', 'DEBUG')
self._config.set('logging', 'interface_level', 'DEBUG')
self._config.set('logging', 'utils_level', 'DEBUG')
logging.update_logging(self._config)
def set_log_dir(self, log_dir):
"""Sets logging directory
This should be the first thing that is done before any nipype class
with logging is imported.
"""
self._config.set('logging', 'log_directory', log_dir)
def get(self, section, option, default=None):
"""Get an option"""
if option in CONFIG_DEPRECATIONS:
msg = ('Config option "%s" has been deprecated as of nipype %s. '
'Please use "%s" instead.') % (
option, CONFIG_DEPRECATIONS[option][1],
CONFIG_DEPRECATIONS[option][0])
warn(msg)
section, option = CONFIG_DEPRECATIONS[option][0].split('.')
if self._config.has_option(section, option):
return self._config.get(section, option)
return default
def set(self, section, option, value):
"""Set new value on option"""
if isinstance(value, bool):
value = str(value)
if option in CONFIG_DEPRECATIONS:
msg = ('Config option "%s" has been deprecated as of nipype %s. '
'Please use "%s" instead.') % (
option, CONFIG_DEPRECATIONS[option][1],
CONFIG_DEPRECATIONS[option][0])
warn(msg)
section, option = CONFIG_DEPRECATIONS[option][0].split('.')
return self._config.set(section, option, value)
def getboolean(self, section, option):
"""Get a boolean option from section"""
return self._config.getboolean(section, option)
def has_option(self, section, option):
"""Check if option exists in section"""
return self._config.has_option(section, option)
@property
def _sections(self):
return self._config._sections
def get_data(self, key):
"""Read options file"""
if not os.path.exists(self.data_file):
return None
with open(self.data_file, 'rt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict = load(file)
if key in datadict:
return datadict[key]
return None
def save_data(self, key, value):
"""Store config flie"""
datadict = {}
if os.path.exists(self.data_file):
with open(self.data_file, 'rt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict = load(file)
else:
dirname = os.path.dirname(self.data_file)
if not os.path.exists(dirname):
mkdir_p(dirname)
with open(self.data_file, 'wt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict[key] = value
dump(datadict, file)
def update_config(self, config_dict):
"""Extend internal dictionary with config_dict"""
for section in ['execution', 'logging', 'check']:
if section in config_dict:
for key, val in list(config_dict[section].items()):
if not key.startswith('__'):
self._config.set(section, key, str(val))
def update_matplotlib(self):
"""Set backend on matplotlib from options"""
import matplotlib
matplotlib.use(self.get('execution', 'matplotlib_backend'))
def enable_provenance(self):
"""Sets provenance storing on"""
self._config.set('execution', 'write_provenance', 'true')
self._config.set('execution', 'hash_method', 'content')
@property
def resource_monitor(self):
"""Check if resource_monitor is available"""
if self._resource_monitor is not None:
return self._resource_monitor
# Cache config from nipype config
self.resource_monitor = str2bool(
self._config.get('monitoring', 'enabled')) or False
return self._resource_monitor
@resource_monitor.setter
def resource_monitor(self, value):
# Accept string true/false values
if isinstance(value, (str, bytes)):
value = str2bool(value.lower())
if value is False:
self._resource_monitor = False
elif value is True:
if not self._resource_monitor:
# Before setting self._resource_monitor check psutil
# availability
self._resource_monitor = False
try:
import psutil
self._resource_monitor = LooseVersion(
psutil.__version__) >= LooseVersion('5.0')
except ImportError:
pass
finally:
if not self._resource_monitor:
warn('Could not enable the resource monitor: '
'psutil>=5.0 could not be imported.')
self._config.set('monitoring', 'enabled',
('%s' % self._resource_monitor).lower())
def enable_resource_monitor(self):
"""Sets the resource monitor on"""
self.resource_monitor = True
def disable_resource_monitor(self):
"""Sets the resource monitor off"""
self.resource_monitor = False
def get_display(self):
"""Returns the first display available"""
# Check if an Xorg server is listening
# import subprocess as sp
# if not hasattr(sp, 'DEVNULL'):
# setattr(sp, 'DEVNULL', os.devnull)
# x_listening = bool(sp.call('ps au | grep -v grep | grep -i xorg',
# shell=True, stdout=sp.DEVNULL))
if self._display is not None:
return ':%d' % self._display.new_display
sysdisplay = None
if self._config.has_option('execution', 'display_variable'):
sysdisplay = self._config.get('execution', 'display_variable')
sysdisplay = sysdisplay or os.getenv('DISPLAY')
if sysdisplay:
from collections import namedtuple
def _mock():
pass
# Store a fake Xvfb object. Format - <host>:<display>[.<screen>]
ndisp = sysdisplay.split(':')[-1].split('.')[0]
Xvfb = namedtuple('Xvfb', ['new_display', 'stop'])
self._display = Xvfb(int(ndisp), _mock)
return self.get_display()
else:
if 'darwin' in sys.platform:
raise RuntimeError(
'Xvfb requires root permissions to run in OSX. Please '
'make sure that an X server is listening and set the '
'appropriate config on either $DISPLAY or nipype\'s '
'"display_variable" config. Valid X servers include '
'VNC, XQuartz, or manually started Xvfb.')
# If $DISPLAY is empty, it confuses Xvfb so unset
if sysdisplay == '':
del os.environ['DISPLAY']
try:
from xvfbwrapper import Xvfb
except ImportError:
raise RuntimeError(
'A display server was required, but $DISPLAY is not '
'defined and Xvfb could not be imported.')
self._display = Xvfb(nolisten='tcp')
self._display.start()
# Older versions of xvfbwrapper used vdisplay_num
if not hasattr(self._display, 'new_display'):
setattr(self._display, 'new_display',
self._display.vdisplay_num)
return self.get_display()
def stop_display(self):
"""Closes the display if started"""
if self._display is not None:
from .. import logging
self._display.stop()
logging.getLogger('nipype.interface').debug(
'Closing display (if virtual)')
@atexit.register
def free_display():
"""Stop virtual display (if it is up)"""
from .. import config
config.stop_display()
|
[] |
[] |
[
"PWD",
"DISPLAY"
] |
[]
|
["PWD", "DISPLAY"]
|
python
| 2 | 0 | |
commons/config/config_test.go
|
/*
@Time : 2020/11/5 11:47 上午
@Author : ShadowWalker
@Email : [email protected]
@File : config_test
@Software: GoLand
@Description: 配置组件 单元测试
*/
package config
import (
"github.com/offcn-jl/gaea-back-end/commons/database/structs"
"github.com/offcn-jl/gaea-back-end/commons/utt"
. "github.com/smartystreets/goconvey/convey"
"os"
"testing"
)
// TestInit 测试 Init 函数是否可以完成初始化配置
func TestInit(t *testing.T) {
Convey("测试 初始化配置", t, func() {
Convey("测试 未初始化 ORM 时 抛出 PANIC [ runtime error: invalid memory address or nil pointer dereference ]", func() {
So(func() { Init(utt.ORM) }, ShouldPanic)
})
Convey("测试 DSN 配置有误 时 抛出 PANIC [ runtime error: invalid memory address or nil pointer dereference ]", func() {
rightDSN := os.Getenv("UNIT_TEST_MYSQL_DSN_GAEA")
os.Setenv("UNIT_TEST_MYSQL_DSN_GAEA", "INVALID_DSN")
utt.CreatORM()
So(func() { Init(utt.ORM) }, ShouldPanic)
os.Setenv("UNIT_TEST_MYSQL_DSN_GAEA", rightDSN)
})
Convey("测试 不存在记录 时 返回错误 [ record not found ]", func() {
// 使用正确的 DSN 创建 ORM
utt.CreatORM()
// 初始化 ORM
utt.InitORM()
So(Init(utt.ORM), ShouldBeError, "record not found")
})
Convey("测试 存在记录 时 返回值为空并且成功取出配置", func() {
// 创建一条记录
utt.ORM.Create(&structs.SystemConfig{DisableDebug: true})
So(Init(utt.ORM), ShouldBeEmpty)
So(currentConfig.DisableDebug, ShouldBeTrue)
})
})
// 恢复配置状态
currentConfig = structs.SystemConfig{}
// 在程序结束时重置数据库
utt.CloseORM()
}
// TestGet 测试 Get 函数是否可以按照预期获取配置
func TestGet(t *testing.T) {
Convey("测试 Get 函数是否可以按照预期获取配置", t, func() {
Convey("测试 未初始化配置 ( 或初始化失败 ) 时, 获取到的配置为默认配置", func() {
So(Get().DisableDebug, ShouldBeFalse)
})
Convey("测试 初始化配置后 获取到的时数据库中的最后一条配置", func() {
// 使用正确的 DSN 创建 ORM
utt.CreatORM()
// 初始化 ORM
utt.InitORM()
// 创建一条记录
utt.ORM.Create(&structs.SystemConfig{DisableDebug: true})
// 初始化配置
So(Init(utt.ORM), ShouldBeEmpty)
// 获取配置
So(Get().DisableDebug, ShouldBeTrue)
// 在程序结束时重置数据库
utt.CloseORM()
})
})
}
// TestUpdate 测试 Update 函数是否可以完成修改配置
func TestUpdate(t *testing.T) {
Convey("测试 Update 函数是否可以完成修改配置", t, func() {
// 使用正确的 DSN 创建 ORM
utt.CreatORM()
// 初始化 ORM
utt.InitORM()
// 创建一条记录
utt.ORM.Create(&structs.SystemConfig{DisableDebug: true})
// 初始化配置
So(Init(utt.ORM), ShouldBeEmpty)
// 获取配置
So(Get().DisableDebug, ShouldBeTrue)
// 修改配置
Update(utt.ORM, structs.SystemConfig{DisableDebug: false})
// 获取配置
So(Get().DisableDebug, ShouldBeFalse)
// 在程序结束时重置数据库
utt.CloseORM()
})
}
|
[
"\"UNIT_TEST_MYSQL_DSN_GAEA\""
] |
[] |
[
"UNIT_TEST_MYSQL_DSN_GAEA"
] |
[]
|
["UNIT_TEST_MYSQL_DSN_GAEA"]
|
go
| 1 | 0 | |
library/cache/memcache/memcache_test.go
|
package memcache
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"go-common/library/container/pool"
xtime "go-common/library/time"
)
var testMemcacheAddr = "127.0.0.1:11211"
var testConfig = &Config{
Config: &pool.Config{
Active: 10,
Idle: 10,
IdleTimeout: xtime.Duration(time.Second),
WaitTimeout: xtime.Duration(time.Second),
Wait: false,
},
Proto: "tcp",
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}
func init() {
if addr := os.Getenv("TEST_MEMCACHE_ADDR"); addr != "" {
testMemcacheAddr = addr
}
testConfig.Addr = testMemcacheAddr
}
func TestMain(m *testing.M) {
testClient = New(testConfig)
m.Run()
testClient.Close()
os.Exit(0)
}
func ExampleConn_set() {
var (
err error
value []byte
conn Conn
expire int32 = 100
p = struct {
Name string
Age int64
}{"golang", 10}
)
cnop := DialConnectTimeout(time.Duration(time.Second))
rdop := DialReadTimeout(time.Duration(time.Second))
wrop := DialWriteTimeout(time.Duration(time.Second))
if value, err = json.Marshal(p); err != nil {
fmt.Println(err)
return
}
if conn, err = Dial("tcp", testMemcacheAddr, cnop, rdop, wrop); err != nil {
fmt.Println(err)
return
}
// FlagRAW test
itemRaw := &Item{
Key: "test_raw",
Value: value,
Expiration: expire,
}
if err = conn.Set(itemRaw); err != nil {
fmt.Println(err)
return
}
// FlagGzip
itemGZip := &Item{
Key: "test_gzip",
Value: value,
Flags: FlagGzip,
Expiration: expire,
}
if err = conn.Set(itemGZip); err != nil {
fmt.Println(err)
return
}
// FlagGOB
itemGOB := &Item{
Key: "test_gob",
Object: p,
Flags: FlagGOB,
Expiration: expire,
}
if err = conn.Set(itemGOB); err != nil {
fmt.Println(err)
return
}
// FlagJSON
itemJSON := &Item{
Key: "test_json",
Object: p,
Flags: FlagJSON,
Expiration: expire,
}
if err = conn.Set(itemJSON); err != nil {
fmt.Println(err)
return
}
// FlagJSON | FlagGzip
itemJSONGzip := &Item{
Key: "test_jsonGzip",
Object: p,
Flags: FlagJSON | FlagGzip,
Expiration: expire,
}
if err = conn.Set(itemJSONGzip); err != nil {
fmt.Println(err)
return
}
// Output:
}
func ExampleConn_get() {
var (
err error
item2 *Item
conn Conn
p struct {
Name string
Age int64
}
)
cnop := DialConnectTimeout(time.Duration(time.Second))
rdop := DialReadTimeout(time.Duration(time.Second))
wrop := DialWriteTimeout(time.Duration(time.Second))
if conn, err = Dial("tcp", testMemcacheAddr, cnop, rdop, wrop); err != nil {
fmt.Println(err)
return
}
if item2, err = conn.Get("test_raw"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagRAW conn.Scan error(%v)\n", err)
return
}
}
// FlagGZip
if item2, err = conn.Get("test_gzip"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagGZip conn.Scan error(%v)\n", err)
return
}
}
// FlagGOB
if item2, err = conn.Get("test_gob"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagGOB conn.Scan error(%v)\n", err)
return
}
}
// FlagJSON
if item2, err = conn.Get("test_json"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagJSON conn.Scan error(%v)\n", err)
return
}
}
// Output:
}
func ExampleConn_getMulti() {
var (
err error
conn Conn
res map[string]*Item
keys = []string{"test_raw", "test_gzip"}
p struct {
Name string
Age int64
}
)
cnop := DialConnectTimeout(time.Duration(time.Second))
rdop := DialReadTimeout(time.Duration(time.Second))
wrop := DialWriteTimeout(time.Duration(time.Second))
if conn, err = Dial("tcp", testMemcacheAddr, cnop, rdop, wrop); err != nil {
fmt.Println(err)
return
}
if res, err = conn.GetMulti(keys); err != nil {
fmt.Printf("conn.GetMulti(%v) error(%v)", keys, err)
return
}
for _, v := range res {
if err = conn.Scan(v, &p); err != nil {
fmt.Printf("conn.Scan error(%v)\n", err)
return
}
fmt.Println(p)
}
// Output:
//{golang 10}
//{golang 10}
}
|
[
"\"TEST_MEMCACHE_ADDR\""
] |
[] |
[
"TEST_MEMCACHE_ADDR"
] |
[]
|
["TEST_MEMCACHE_ADDR"]
|
go
| 1 | 0 | |
app/__init__.py
|
import os
from flask import Blueprint, Flask, jsonify
from .apis import api
from .extensions import cors, db, jwt, mail, migrate, redis
def create_app():
app = Flask(__name__)
# set config
app_settings = os.getenv('APP_SETTINGS') if os.getenv('APP_SETTINGS') else 'app.config.DevelopmentConfig'
app.config.from_object(app_settings)
# set up extensions
cors.init_app(app, resources={r'/api/*': {'origins': '*'}})
db.init_app(app)
jwt.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
redis.init_app(app)
api.init_app(app)
return app
|
[] |
[] |
[
"APP_SETTINGS"
] |
[]
|
["APP_SETTINGS"]
|
python
| 1 | 0 | |
chain/vm/runtime.go
|
package vm
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"os"
gruntime "runtime"
"time"
"github.com/ipfs/go-cid"
ipldcbor "github.com/ipfs/go-ipld-cbor"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
rtt "github.com/filecoin-project/go-state-types/rt"
rt0 "github.com/filecoin-project/specs-actors/actors/runtime"
rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
rt3 "github.com/filecoin-project/specs-actors/v3/actors/runtime"
rt4 "github.com/filecoin-project/specs-actors/v4/actors/runtime"
rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
rt6 "github.com/filecoin-project/specs-actors/v6/actors/runtime"
rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
)
type Message struct {
msg types.Message
}
func (m *Message) Caller() address.Address {
if m.msg.From.Protocol() != address.ID {
panic("runtime message has a non-ID caller")
}
return m.msg.From
}
func (m *Message) Receiver() address.Address {
if m.msg.To != address.Undef && m.msg.To.Protocol() != address.ID {
panic("runtime message has a non-ID receiver")
}
return m.msg.To
}
func (m *Message) ValueReceived() abi.TokenAmount {
return m.msg.Value
}
// EnableDetailedTracing, if true, outputs gas tracing in execution traces.
var EnableDetailedTracing = os.Getenv("LOTUS_VM_ENABLE_TRACING") == "1"
type Runtime struct {
rt7.Message
rt7.Syscalls
ctx context.Context
vm *LegacyVM
state *state.StateTree
height abi.ChainEpoch
cst ipldcbor.IpldStore
pricelist Pricelist
gasAvailable int64
gasUsed int64
// address that started invoke chain
origin address.Address
originNonce uint64
executionTrace types.ExecutionTrace
depth uint64
numActorsCreated uint64
allowInternal bool
callerValidated bool
lastGasChargeTime time.Time
lastGasCharge *types.GasTrace
}
func (rt *Runtime) BaseFee() abi.TokenAmount {
return rt.vm.baseFee
}
func (rt *Runtime) NetworkVersion() network.Version {
return rt.vm.networkVersion
}
func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount {
cs, err := rt.vm.GetCircSupply(rt.ctx)
if err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get total circ supply: %s", err)
}
return cs
}
func (rt *Runtime) ResolveAddress(addr address.Address) (ret address.Address, ok bool) {
r, err := rt.state.LookupID(addr)
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
return address.Undef, false
}
panic(aerrors.Fatalf("failed to resolve address %s: %s", addr, err))
}
return r, true
}
type notFoundErr interface {
IsNotFound() bool
}
func (rt *Runtime) StoreGet(c cid.Cid, o cbor.Unmarshaler) bool {
if err := rt.cst.Get(context.TODO(), c, o); err != nil {
var nfe notFoundErr
if xerrors.As(err, &nfe) && nfe.IsNotFound() {
if xerrors.As(err, new(ipldcbor.SerializationError)) {
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to unmarshal cbor object %s", err))
}
return false
}
panic(aerrors.Fatalf("failed to get cbor object %s: %s", c, err))
}
return true
}
func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid {
c, err := rt.cst.Put(context.TODO(), x)
if err != nil {
if xerrors.As(err, new(ipldcbor.SerializationError)) {
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err))
}
panic(aerrors.Fatalf("failed to put cbor object: %s", err))
}
return c
}
var _ rt0.Runtime = (*Runtime)(nil)
var _ rt5.Runtime = (*Runtime)(nil)
var _ rt2.Runtime = (*Runtime)(nil)
var _ rt3.Runtime = (*Runtime)(nil)
var _ rt4.Runtime = (*Runtime)(nil)
var _ rt5.Runtime = (*Runtime)(nil)
var _ rt6.Runtime = (*Runtime)(nil)
var _ rt7.Runtime = (*Runtime)(nil)
func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
defer func() {
if r := recover(); r != nil {
if ar, ok := r.(aerrors.ActorError); ok {
log.Warnf("LegacyVM.Call failure in call from: %s to %s: %+v", rt.Caller(), rt.Receiver(), ar)
aerr = ar
return
}
//log.Desugar().WithOptions(zap.AddStacktrace(zapcore.ErrorLevel)).
//Sugar().Errorf("spec actors failure: %s", r)
log.Errorf("spec actors failure: %s", r)
if rt.NetworkVersion() <= network.Version3 {
aerr = aerrors.Newf(1, "spec actors failure: %s", r)
} else {
aerr = aerrors.Newf(exitcode.SysErrReserved1, "spec actors failure: %s", r)
}
}
}()
ret := f()
if !rt.callerValidated {
rt.Abortf(exitcode.SysErrorIllegalActor, "Caller MUST be validated during method execution")
}
switch ret := ret.(type) {
case []byte:
return ret, nil
case *abi.EmptyValue:
return nil, nil
case cbor.Marshaler:
buf := new(bytes.Buffer)
if err := ret.MarshalCBOR(buf); err != nil {
return nil, aerrors.Absorb(err, 2, "failed to marshal response to cbor")
}
return buf.Bytes(), nil
case nil:
return nil, nil
default:
return nil, aerrors.New(3, "could not determine type for response from call")
}
}
func (rt *Runtime) ValidateImmediateCallerAcceptAny() {
rt.abortIfAlreadyValidated()
return
}
func (rt *Runtime) CurrentBalance() abi.TokenAmount {
b, err := rt.GetBalance(rt.Receiver())
if err != nil {
rt.Abortf(err.RetCode(), "get current balance: %v", err)
}
return b
}
func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) {
act, err := rt.state.GetActor(addr)
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
return cid.Undef, false
}
panic(aerrors.Fatalf("failed to get actor: %s", err))
}
return act.Code, true
}
func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy)
if err != nil {
panic(aerrors.Fatalf("could not get ticket randomness: %s", err))
}
return res
}
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy)
if err != nil {
panic(aerrors.Fatalf("could not get beacon randomness: %s", err))
}
return res
}
func (rt *Runtime) NewActorAddress() address.Address {
var b bytes.Buffer
oa, _ := ResolveToKeyAddr(rt.vm.cstate, rt.vm.cst, rt.origin)
if err := oa.MarshalCBOR(&b); err != nil { // todo: spec says cbor; why not just bytes?
panic(aerrors.Fatalf("writing caller address into a buffer: %v", err))
}
if err := binary.Write(&b, binary.BigEndian, rt.originNonce); err != nil {
panic(aerrors.Fatalf("writing nonce address into a buffer: %v", err))
}
if err := binary.Write(&b, binary.BigEndian, rt.numActorsCreated); err != nil { // TODO: expose on vm
panic(aerrors.Fatalf("writing callSeqNum address into a buffer: %v", err))
}
addr, err := address.NewActorAddress(b.Bytes())
if err != nil {
panic(aerrors.Fatalf("create actor address: %v", err))
}
rt.incrementNumActorsCreated()
return addr
}
func (rt *Runtime) CreateActor(codeID cid.Cid, addr address.Address) {
if addr == address.Undef && rt.NetworkVersion() >= network.Version7 {
rt.Abortf(exitcode.SysErrorIllegalArgument, "CreateActor with Undef address")
}
act, aerr := rt.vm.areg.Create(codeID, rt)
if aerr != nil {
rt.Abortf(aerr.RetCode(), aerr.Error())
}
_, err := rt.state.GetActor(addr)
if err == nil {
rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists")
}
rt.chargeGas(rt.Pricelist().OnCreateActor())
err = rt.state.SetActor(addr, act)
if err != nil {
panic(aerrors.Fatalf("creating actor entry: %v", err))
}
_ = rt.chargeGasSafe(gasOnActorExec)
}
// DeleteActor deletes the executing actor from the state tree, transferring
// any balance to beneficiary.
// Aborts if the beneficiary does not exist or is the calling actor.
// May only be called by the actor itself.
func (rt *Runtime) DeleteActor(beneficiary address.Address) {
rt.chargeGas(rt.Pricelist().OnDeleteActor())
act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
rt.Abortf(exitcode.SysErrorIllegalActor, "failed to load actor in delete actor: %s", err)
}
panic(aerrors.Fatalf("failed to get actor: %s", err))
}
if !act.Balance.IsZero() {
// TODO: Should be safe to drop the version-check,
// since only the paych actor called this pre-version 7, but let's leave it for now
if rt.NetworkVersion() >= network.Version7 {
beneficiaryId, found := rt.ResolveAddress(beneficiary)
if !found {
rt.Abortf(exitcode.SysErrorIllegalArgument, "beneficiary doesn't exist")
}
if beneficiaryId == rt.Receiver() {
rt.Abortf(exitcode.SysErrorIllegalArgument, "benefactor cannot be beneficiary")
}
}
// Transfer the executing actor's balance to the beneficiary
if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance, rt.NetworkVersion()); err != nil {
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
}
}
// Delete the executing actor
if err := rt.state.DeleteActor(rt.Receiver()); err != nil {
panic(aerrors.Fatalf("failed to delete actor: %s", err))
}
_ = rt.chargeGasSafe(gasOnActorExec)
}
func (rt *Runtime) StartSpan(name string) func() {
panic("implement me")
}
func (rt *Runtime) ValidateImmediateCallerIs(as ...address.Address) {
rt.abortIfAlreadyValidated()
imm := rt.Caller()
for _, a := range as {
if imm == a {
return
}
}
rt.Abortf(exitcode.SysErrForbidden, "caller %s is not one of %s", rt.Caller(), as)
}
func (rt *Runtime) Context() context.Context {
return rt.ctx
}
func (rt *Runtime) Abortf(code exitcode.ExitCode, msg string, args ...interface{}) {
log.Warnf("Abortf: " + fmt.Sprintf(msg, args...))
panic(aerrors.NewfSkip(2, code, msg, args...))
}
func (rt *Runtime) AbortStateMsg(msg string) {
panic(aerrors.NewfSkip(3, 101, msg))
}
func (rt *Runtime) ValidateImmediateCallerType(ts ...cid.Cid) {
rt.abortIfAlreadyValidated()
callerCid, ok := rt.GetActorCodeCID(rt.Caller())
if !ok {
panic(aerrors.Fatalf("failed to lookup code cid for caller"))
}
for _, t := range ts {
if t == callerCid {
return
}
// this really only for genesis in tests; nv16 will be running on FVM anyway.
if nv := rt.NetworkVersion(); nv >= network.Version16 {
av, err := actors.VersionForNetwork(nv)
if err != nil {
panic(aerrors.Fatalf("failed to get actors version for network version %d", nv))
}
name := actors.CanonicalName(builtin.ActorNameByCode(t))
ac, ok := actors.GetActorCodeID(av, name)
if ok && ac == callerCid {
return
}
}
}
rt.Abortf(exitcode.SysErrForbidden, "caller cid type %q was not one of %v", callerCid, ts)
}
func (rt *Runtime) CurrEpoch() abi.ChainEpoch {
return rt.height
}
func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode {
if !rt.allowInternal {
rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed")
}
var params []byte
if m != nil {
buf := new(bytes.Buffer)
if err := m.MarshalCBOR(buf); err != nil {
rt.Abortf(exitcode.ErrSerialization, "failed to marshal input parameters: %s", err)
}
params = buf.Bytes()
}
ret, err := rt.internalSend(rt.Receiver(), to, method, value, params)
if err != nil {
if err.IsFatal() {
panic(err)
}
log.Warnf("vmctx send failed: from: %s to: %s, method: %d: err: %s", rt.Receiver(), to, method, err)
return err.RetCode()
}
_ = rt.chargeGasSafe(gasOnActorExec)
if err := out.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
rt.Abortf(exitcode.ErrSerialization, "failed to unmarshal return value: %s", err)
}
return 0
}
func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) {
start := build.Clock.Now()
ctx, span := trace.StartSpan(rt.ctx, "vmc.Send")
defer span.End()
if span.IsRecordingEvents() {
span.AddAttributes(
trace.StringAttribute("to", to.String()),
trace.Int64Attribute("method", int64(method)),
trace.StringAttribute("value", value.String()),
)
}
msg := &types.Message{
From: from,
To: to,
Method: method,
Value: value,
Params: params,
GasLimit: rt.gasAvailable,
}
st := rt.state
if err := st.Snapshot(ctx); err != nil {
return nil, aerrors.Fatalf("snapshot failed: %s", err)
}
defer st.ClearSnapshot()
ret, errSend, subrt := rt.vm.send(ctx, msg, rt, nil, start)
if errSend != nil {
if errRevert := st.Revert(); errRevert != nil {
return nil, aerrors.Escalate(errRevert, "failed to revert state tree after failed subcall")
}
}
if subrt != nil {
rt.numActorsCreated = subrt.numActorsCreated
rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace)
}
return ret, errSend
}
func (rt *Runtime) StateCreate(obj cbor.Marshaler) {
c := rt.StorePut(obj)
err := rt.stateCommit(EmptyObjectCid, c)
if err != nil {
panic(fmt.Errorf("failed to commit state after creating object: %w", err))
}
}
func (rt *Runtime) StateReadonly(obj cbor.Unmarshaler) {
act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err)
}
rt.StoreGet(act.Head, obj)
}
func (rt *Runtime) StateTransaction(obj cbor.Er, f func()) {
if obj == nil {
rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()")
}
act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err)
}
baseState := act.Head
rt.StoreGet(baseState, obj)
rt.allowInternal = false
f()
rt.allowInternal = true
c := rt.StorePut(obj)
err = rt.stateCommit(baseState, c)
if err != nil {
panic(fmt.Errorf("failed to commit state after transaction: %w", err))
}
}
func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) {
act, err := rt.state.GetActor(a)
switch err {
default:
return types.EmptyInt, aerrors.Escalate(err, "failed to look up actor balance")
case types.ErrActorNotFound:
return types.NewInt(0), nil
case nil:
return act.Balance, nil
}
}
func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
// TODO: we can make this more efficient in the future...
act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
return aerrors.Escalate(err, "failed to get actor to commit state")
}
if act.Head != oldh {
return aerrors.Fatal("failed to update, inconsistent base reference")
}
act.Head = newh
if err := rt.state.SetActor(rt.Receiver(), act); err != nil {
return aerrors.Fatalf("failed to set actor in commit state: %s", err)
}
return nil
}
func (rt *Runtime) finilizeGasTracing() {
if EnableDetailedTracing {
if rt.lastGasCharge != nil {
rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime)
}
}
}
// ChargeGas is spec actors function
func (rt *Runtime) ChargeGas(name string, compute int64, virtual int64) {
err := rt.chargeGasInternal(newGasCharge(name, compute, 0).WithVirtual(virtual, 0), 1)
if err != nil {
panic(err)
}
}
func (rt *Runtime) chargeGas(gas GasCharge) {
err := rt.chargeGasInternal(gas, 1)
if err != nil {
panic(err)
}
}
func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) {
return func(gas GasCharge) {
err := rt.chargeGasInternal(gas, 1+skip)
if err != nil {
panic(err)
}
}
}
func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError {
toUse := gas.Total()
if EnableDetailedTracing {
var callers [10]uintptr
cout := gruntime.Callers(2+skip, callers[:])
now := build.Clock.Now()
if rt.lastGasCharge != nil {
rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime)
}
gasTrace := types.GasTrace{
Name: gas.Name,
Extra: gas.Extra,
TotalGas: toUse,
ComputeGas: gas.ComputeGas,
StorageGas: gas.StorageGas,
VirtualComputeGas: gas.VirtualCompute,
VirtualStorageGas: gas.VirtualStorage,
Callers: callers[:cout],
}
if gasTrace.VirtualStorageGas == 0 {
gasTrace.VirtualStorageGas = gasTrace.StorageGas
}
if gasTrace.VirtualComputeGas == 0 {
gasTrace.VirtualComputeGas = gasTrace.ComputeGas
}
gasTrace.TotalVirtualGas = gasTrace.VirtualComputeGas + gasTrace.VirtualStorageGas
rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
rt.lastGasChargeTime = now
rt.lastGasCharge = &gasTrace
}
// overflow safe
if rt.gasUsed > rt.gasAvailable-toUse {
gasUsed := rt.gasUsed
rt.gasUsed = rt.gasAvailable
return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d, use=%d",
gasUsed, rt.gasAvailable, toUse)
}
rt.gasUsed += toUse
return nil
}
func (rt *Runtime) chargeGasSafe(gas GasCharge) aerrors.ActorError {
return rt.chargeGasInternal(gas, 1)
}
func (rt *Runtime) Pricelist() Pricelist {
return rt.pricelist
}
func (rt *Runtime) incrementNumActorsCreated() {
rt.numActorsCreated++
}
func (rt *Runtime) abortIfAlreadyValidated() {
if rt.callerValidated {
rt.Abortf(exitcode.SysErrorIllegalActor, "Method must validate caller identity exactly once")
}
rt.callerValidated = true
}
func (rt *Runtime) Log(level rtt.LogLevel, msg string, args ...interface{}) {
switch level {
case rtt.DEBUG:
actorLog.Debugf(msg, args...)
case rtt.INFO:
actorLog.Infof(msg, args...)
case rtt.WARN:
actorLog.Warnf(msg, args...)
case rtt.ERROR:
actorLog.Errorf(msg, args...)
}
}
|
[
"\"LOTUS_VM_ENABLE_TRACING\""
] |
[] |
[
"LOTUS_VM_ENABLE_TRACING"
] |
[]
|
["LOTUS_VM_ENABLE_TRACING"]
|
go
| 1 | 0 | |
airflow/providers/amazon/aws/example_dags/example_s3_bucket.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow.models.dag import DAG
from airflow.operators.python import PythonOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.operators.s3_bucket import S3CreateBucketOperator, S3DeleteBucketOperator
from airflow.utils.dates import days_ago
BUCKET_NAME = os.environ.get('BUCKET_NAME', 'test-airflow-12345')
def upload_keys():
"""This is a python callback to add keys into the s3 bucket"""
# add keys to bucket
s3_hook = S3Hook()
for i in range(0, 3):
s3_hook.load_string(
string_data="input",
key=f"path/data{i}",
bucket_name=BUCKET_NAME,
)
with DAG(
dag_id='s3_bucket_dag',
schedule_interval=None,
start_date=days_ago(2),
max_active_runs=1,
tags=['example'],
) as dag:
create_bucket = S3CreateBucketOperator(
task_id='s3_bucket_dag_create',
bucket_name=BUCKET_NAME,
region_name='us-east-1',
)
add_keys_to_bucket = PythonOperator(
task_id="s3_bucket_dag_add_keys_to_bucket",
python_callable=upload_keys
)
delete_bucket = S3DeleteBucketOperator(
task_id='s3_bucket_dag_delete',
bucket_name=BUCKET_NAME,
force_delete=True,
)
create_bucket >> add_keys_to_bucket >> delete_bucket
|
[] |
[] |
[
"BUCKET_NAME"
] |
[]
|
["BUCKET_NAME"]
|
python
| 1 | 0 | |
variable/cmd.go
|
package variable
import (
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"github.com/lmika/shellwords"
)
// CmdVar 命令变量
type CmdVar struct {
}
// NewCmdVar 实例化命令变量
func NewCmdVar() *CmdVar {
return &CmdVar{}
}
// Eval 表达式变量求值
func (v *CmdVar) Eval(expr string, debug bool) (val string, err error) {
nameAndArgs := strings.TrimSuffix(strings.TrimPrefix(strings.TrimSpace(expr), "$("), ")")
if nameAndArgs == "" {
return "", nil
}
output, err := v.exec(nameAndArgs, debug)
if err != nil {
return "", err
}
return strings.TrimSpace(string(output)), nil
}
func (v *CmdVar) exec(nameAndArgs string, debug bool) (output []byte, err error) {
if runtime.GOOS != "windows" {
if sh := os.Getenv("SHELL"); sh != "" {
return v.execByShell(sh, nameAndArgs, debug)
}
}
return v.execByNative(nameAndArgs, debug)
}
func (v *CmdVar) execByNative(nameAndArgs string, debug bool) (output []byte, err error) {
if debug {
fmt.Println("==>", nameAndArgs)
}
fields := shellwords.Split(nameAndArgs)
var cmd *exec.Cmd
if len(fields) == 1 {
cmd = exec.Command(fields[0])
} else if len(fields) > 1 {
cmd = exec.Command(fields[0], fields[1:]...)
} else {
panic("unreachable")
}
return cmd.Output()
}
func (v *CmdVar) execByShell(sh, cmds string, debug bool) (output []byte, err error) {
if debug {
fmt.Println("==>", fmt.Sprintf("%s -c %q", sh, cmds))
}
return exec.Command(sh, "-c", cmds).Output()
}
// Match 表达式是否可以使用当前变量求值
func (v *CmdVar) Match(expr string) (matched bool) {
return strings.HasPrefix(expr, "$(") && strings.HasSuffix(expr, ")")
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
tests/framework/utils/k8s_helper.go
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"os"
"path"
"strconv"
"strings"
"testing"
"time"
"github.com/coreos/pkg/capnslog"
rookclient "github.com/rook/rook/pkg/client/clientset/versioned"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/util/exec"
"github.com/stretchr/testify/require"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
storagev1util "k8s.io/kubernetes/pkg/apis/storage/v1/util"
)
// K8sHelper is a helper for common kubectl commads
type K8sHelper struct {
executor *exec.CommandExecutor
Clientset *kubernetes.Clientset
RookClientset *rookclient.Clientset
RunningInCluster bool
T func() *testing.T
}
const (
// RetryLoop params for tests.
RetryLoop = 60
// RetryInterval param for test - wait time while in RetryLoop
RetryInterval = 5
// TestMountPath is the path inside a test pod where storage is mounted
TestMountPath = "/tmp/testrook"
//hostnameTestPrefix is a prefix added to the node hostname
hostnameTestPrefix = "test-prefix-this-is-a-very-long-hostname-"
)
// CreateK8sHelper creates a instance of k8sHelper
func CreateK8sHelper(t func() *testing.T) (*K8sHelper, error) {
executor := &exec.CommandExecutor{}
config, err := getKubeConfig(executor)
if err != nil {
return nil, fmt.Errorf("failed to get kube client. %+v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to get clientset. %+v", err)
}
rookClientset, err := rookclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to get rook clientset. %+v", err)
}
h := &K8sHelper{executor: executor, Clientset: clientset, RookClientset: rookClientset, T: t}
if strings.Index(config.Host, "//10.") != -1 {
h.RunningInCluster = true
}
return h, err
}
var k8slogger = capnslog.NewPackageLogger("github.com/rook/rook", "utils")
// GetK8sServerVersion returns k8s server version under test
func (k8sh *K8sHelper) GetK8sServerVersion() string {
versionInfo, err := k8sh.Clientset.ServerVersion()
require.Nil(k8sh.T(), err)
return versionInfo.GitVersion
}
func (k8sh *K8sHelper) VersionAtLeast(minVersion string) bool {
v := version.MustParseSemantic(k8sh.GetK8sServerVersion())
return v.AtLeast(version.MustParseSemantic(minVersion))
}
func (k8sh *K8sHelper) VersionMinorMatches(minVersion string) (string, bool) {
kubeVersion := k8sh.GetK8sServerVersion()
v := version.MustParseSemantic(kubeVersion)
requestedVersion := version.MustParseSemantic(minVersion)
return kubeVersion, v.Major() == requestedVersion.Major() && v.Minor() == requestedVersion.Minor()
}
func (k8sh *K8sHelper) MakeContext() *clusterd.Context {
return &clusterd.Context{Clientset: k8sh.Clientset, RookClientset: k8sh.RookClientset, Executor: k8sh.executor}
}
func (k8sh *K8sHelper) GetDockerImage(image string) error {
dockercmd := os.Getenv("DOCKERCMD")
if dockercmd == "" {
dockercmd = "docker"
}
return k8sh.executor.ExecuteCommand(false, "", dockercmd, "pull", image)
}
// SetDeploymentVersion sets the container version on the deployment. It is assumed to be the rook/ceph image.
func (k8sh *K8sHelper) SetDeploymentVersion(namespace, deploymentName, containerName, version string) error {
_, err := k8sh.Kubectl("-n", namespace, "set", "image", "deploy/"+deploymentName, containerName+"=rook/ceph:"+version)
return err
}
// Kubectl is wrapper for executing kubectl commands
func (k8sh *K8sHelper) Kubectl(args ...string) (string, error) {
result, err := k8sh.executor.ExecuteCommandWithTimeout(false, 15*time.Second, "kubectl", "kubectl", args...)
if err != nil {
k8slogger.Errorf("Failed to execute: kubectl %+v : %+v. %s", args, err, result)
if args[0] == "delete" {
// allow the tests to continue if we were deleting a resource that timed out
return result, nil
}
return result, fmt.Errorf("Failed to run: kubectl %v : %v", args, err)
}
return result, nil
}
// KubectlWithStdin is wrapper for executing kubectl commands in stdin
func (k8sh *K8sHelper) KubectlWithStdin(stdin string, args ...string) (string, error) {
cmdStruct := CommandArgs{Command: "kubectl", PipeToStdIn: stdin, CmdArgs: args}
cmdOut := ExecuteCommand(cmdStruct)
if cmdOut.ExitCode != 0 {
k8slogger.Errorf("Failed to execute stdin: kubectl %v : %v", args, cmdOut.Err.Error())
if strings.Index(cmdOut.Err.Error(), "(NotFound)") != -1 || strings.Index(cmdOut.StdErr, "(NotFound)") != -1 {
return cmdOut.StdErr, errors.NewNotFound(schema.GroupResource{}, "")
}
return cmdOut.StdErr, fmt.Errorf("Failed to run stdin: kubectl %v : %v", args, cmdOut.StdErr)
}
if cmdOut.StdOut == "" {
return cmdOut.StdErr, nil
}
return cmdOut.StdOut, nil
}
func getKubeConfig(executor exec.Executor) (*rest.Config, error) {
context, err := executor.ExecuteCommandWithOutput(false, "", "kubectl", "config", "view", "-o", "json")
if err != nil {
k8slogger.Errorf("Errors Encountered while executing kubectl command : %v", err)
}
// Parse the kubectl context to get the settings for client connections
var kc kubectlContext
if err := json.Unmarshal([]byte(context), &kc); err != nil {
return nil, fmt.Errorf("failed to unmarshal kubectl config: %+v", err)
}
// find the current context
var currentContext kContext
found := false
for _, c := range kc.Contexts {
if kc.Current == c.Name {
currentContext = c
found = true
}
}
if !found {
return nil, fmt.Errorf("failed to find current context %s in %+v", kc.Current, kc.Contexts)
}
// find the current cluster
var currentCluster kclusterContext
found = false
for _, c := range kc.Clusters {
if currentContext.Cluster.Cluster == c.Name {
currentCluster = c
found = true
}
}
if !found {
return nil, fmt.Errorf("failed to find cluster %s in %+v", kc.Current, kc.Clusters)
}
config := &rest.Config{Host: currentCluster.Cluster.Server}
if currentContext.Cluster.User == "" {
config.Insecure = true
} else {
config.Insecure = false
// find the current user
var currentUser kuserContext
found = false
for _, u := range kc.Users {
if currentContext.Cluster.User == u.Name {
currentUser = u
found = true
}
}
if !found {
return nil, fmt.Errorf("failed to find kube user %s in %+v", kc.Current, kc.Users)
}
config.TLSClientConfig = rest.TLSClientConfig{
CAFile: currentCluster.Cluster.CertAuthority,
KeyFile: currentUser.Cluster.ClientKey,
CertFile: currentUser.Cluster.ClientCert,
}
// Set Insecure to true if cert information is missing
if currentUser.Cluster.ClientCert == "" {
config.Insecure = true
}
}
logger.Infof("Loaded kubectl context %s at %s. secure=%t",
currentCluster.Name, config.Host, !config.Insecure)
return config, nil
}
type kubectlContext struct {
Contexts []kContext `json:"contexts"`
Users []kuserContext `json:"users"`
Clusters []kclusterContext `json:"clusters"`
Current string `json:"current-context"`
}
type kContext struct {
Name string `json:"name"`
Cluster struct {
Cluster string `json:"cluster"`
User string `json:"user"`
} `json:"context"`
}
type kclusterContext struct {
Name string `json:"name"`
Cluster struct {
Server string `json:"server"`
Insecure bool `json:"insecure-skip-tls-verify"`
CertAuthority string `json:"certificate-authority"`
} `json:"cluster"`
}
type kuserContext struct {
Name string `json:"name"`
Cluster struct {
ClientCert string `json:"client-certificate"`
ClientKey string `json:"client-key"`
} `json:"user"`
}
func (k8sh *K8sHelper) Exec(namespace, podName, command string, commandArgs []string) (string, error) {
return k8sh.ExecWithRetry(1, namespace, podName, command, commandArgs)
}
// ExecWithRetry will attempt to run a command "retries" times, waiting 3s between each call. Upon success, returns the output.
func (k8sh *K8sHelper) ExecWithRetry(retries int, namespace, podName, command string, commandArgs []string) (string, error) {
var err error
for i := 0; i < retries; i++ {
args := []string{"exec", "-n", namespace, podName, "--", command}
args = append(args, commandArgs...)
var result string
result, err = k8sh.Kubectl(args...)
if err == nil {
return result, nil
}
if i < retries-1 {
time.Sleep(3 * time.Second)
}
}
return "", fmt.Errorf("kubectl exec command %s failed on pod %s in namespace %s. %+v", command, podName, namespace, err)
}
// ResourceOperationFromTemplate performs a kubectl action from a template file after replacing its context
func (k8sh *K8sHelper) ResourceOperationFromTemplate(action string, podDefinition string, config map[string]string) (string, error) {
t := template.New("testTemplate")
t, err := t.Parse(podDefinition)
if err != nil {
return err.Error(), err
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, config); err != nil {
return err.Error(), err
}
podDef := tpl.String()
args := []string{action, "-f", "-"}
result, err := k8sh.KubectlWithStdin(podDef, args...)
if err == nil {
return result, nil
}
logger.Errorf("Failed to execute kubectl %v %v -- %v", args, podDef, err)
return "", fmt.Errorf("Could not %s resource in args : %v %v-- %v", action, args, podDef, err)
}
// ResourceOperation performs a kubectl action on a pod definition
func (k8sh *K8sHelper) ResourceOperation(action string, manifest string) error {
args := []string{action, "-f", "-"}
logger.Infof("kubectl %s manifest:\n%s", action, manifest)
_, err := k8sh.KubectlWithStdin(manifest, args...)
if err == nil {
return nil
}
logger.Errorf("Failed to execute kubectl %v -- %v", args, err)
return fmt.Errorf("Could Not create resource in args : %v -- %v", args, err)
}
// DeletePod performs a kubectl delete pod on the given pod
func (k8sh *K8sHelper) DeletePod(namespace, name string) error {
args := append([]string{"--grace-period=0", "pod"}, name)
if namespace != "" {
args = append(args, []string{"-n", namespace}...)
}
return k8sh.DeleteResourceAndWait(true, args...)
}
// DeletePods performs a kubectl delete pod on the given pods
func (k8sh *K8sHelper) DeletePods(pods ...string) (msg string, err error) {
for _, pod := range pods {
if perr := k8sh.DeletePod("", pod); perr != nil {
err = perr
}
}
return
}
// DeleteResource performs a kubectl delete on the given args
func (k8sh *K8sHelper) DeleteResource(args ...string) error {
return k8sh.DeleteResourceAndWait(true, args...)
}
// WaitForCustomResourceDeletion waits for the CRD deletion
func (k8sh *K8sHelper) WaitForCustomResourceDeletion(namespace string, checkerFunc func() error) error {
// wait for the operator to finalize and delete the CRD
for i := 0; i < 10; i++ {
err := checkerFunc()
if err == nil {
logger.Infof("custom resource %s still exists", namespace)
time.Sleep(2 * time.Second)
continue
}
if errors.IsNotFound(err) {
logger.Infof("custom resource %s deleted", namespace)
return nil
}
return err
}
logger.Errorf("gave up deleting custom resource %s", namespace)
return nil
}
// DeleteResource performs a kubectl delete on give args.
// If wait is false, a flag will be passed to indicate the delete should return immediately
func (k8sh *K8sHelper) DeleteResourceAndWait(wait bool, args ...string) error {
if !wait {
// new flag in k8s 1.11
v := version.MustParseSemantic(k8sh.GetK8sServerVersion())
if v.AtLeast(version.MustParseSemantic("1.11.0")) {
args = append(args, "--wait=false")
}
}
args = append([]string{"delete"}, args...)
_, err := k8sh.Kubectl(args...)
if err == nil {
return nil
}
return fmt.Errorf("Could Not delete resource in k8s -- %v", err)
}
// GetResource performs a kubectl get on give args
func (k8sh *K8sHelper) GetResource(args ...string) (string, error) {
args = append([]string{"get"}, args...)
result, err := k8sh.Kubectl(args...)
if err == nil {
return result, nil
}
return "", fmt.Errorf("Could Not get resource in k8s -- %v", err)
}
func (k8sh *K8sHelper) CreateNamespace(namespace string) error {
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
_, err := k8sh.Clientset.CoreV1().Namespaces().Create(ns)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create namespace %s. %+v", namespace, err)
}
return nil
}
func (k8sh *K8sHelper) CountPodsWithLabel(label string, namespace string) (int, error) {
options := metav1.ListOptions{LabelSelector: label}
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
if err != nil {
if errors.IsNotFound(err) {
return 0, nil
}
return 0, err
}
return len(pods.Items), nil
}
// WaitForPodCount waits until the desired number of pods with the label are started
func (k8sh *K8sHelper) WaitForPodCount(label, namespace string, count int) error {
options := metav1.ListOptions{LabelSelector: label}
inc := 0
for inc < RetryLoop {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
if err != nil {
return fmt.Errorf("failed to find pod with label %s. %+v", label, err)
}
if len(pods.Items) >= count {
logger.Infof("found %d pods with label %s", count, label)
return nil
}
inc++
time.Sleep(RetryInterval * time.Second)
logger.Infof("waiting for %d pods (found %d) with label %s in namespace %s", count, len(pods.Items), label, namespace)
}
return fmt.Errorf("Giving up waiting for pods with label %s in namespace %s", label, namespace)
}
// IsPodWithLabelPresent return true if there is at least one Pod with the label is present.
func (k8sh *K8sHelper) IsPodWithLabelPresent(label string, namespace string) bool {
count, err := k8sh.CountPodsWithLabel(label, namespace)
if err != nil {
return false
}
return count > 0
}
// WaitForLabeledPodsToRun calls WaitForLabeledPodsToRunWithRetries with the default number of retries
func (k8sh *K8sHelper) WaitForLabeledPodsToRun(label, namespace string) error {
return k8sh.WaitForLabeledPodsToRunWithRetries(label, namespace, RetryLoop)
}
// WaitForLabeledPodsToRunWithRetries returns true if a Pod is running status or goes to Running status within 90s else returns false
func (k8sh *K8sHelper) WaitForLabeledPodsToRunWithRetries(label string, namespace string, retries int) error {
options := metav1.ListOptions{LabelSelector: label}
var lastPod v1.Pod
for i := 0; i < retries; i++ {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
lastStatus := ""
running := 0
if err == nil && len(pods.Items) > 0 {
for _, pod := range pods.Items {
if pod.Status.Phase == "Running" {
running++
}
lastPod = pod
lastStatus = string(pod.Status.Phase)
}
if running == len(pods.Items) {
logger.Infof("All %d pod(s) with label %s are running", len(pods.Items), label)
return nil
}
}
logger.Infof("waiting for pod(s) with label %s in namespace %s to be running. status=%s, running=%d/%d, err=%+v",
label, namespace, lastStatus, running, len(pods.Items), err)
time.Sleep(RetryInterval * time.Second)
}
if len(lastPod.Name) == 0 {
logger.Infof("no pod was found with label %s", label)
} else {
k8sh.PrintPodDescribe(namespace, lastPod.Name)
}
return fmt.Errorf("Giving up waiting for pod with label %s in namespace %s to be running", label, namespace)
}
// WaitUntilPodWithLabelDeleted returns true if a Pod is deleted within 90s else returns false
func (k8sh *K8sHelper) WaitUntilPodWithLabelDeleted(label string, namespace string) bool {
options := metav1.ListOptions{LabelSelector: label}
for i := 0; i < RetryLoop; i++ {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
if errors.IsNotFound(err) {
logger.Infof("error Found err %v", err)
return true
}
if len(pods.Items) == 0 {
logger.Infof("no (more) pods with label %s in namespace %s to be deleted", label, namespace)
return true
}
time.Sleep(RetryInterval * time.Second)
logger.Infof("waiting for pod with label %s in namespace %s to be deleted", label, namespace)
}
logger.Infof("Giving up waiting for pod with label %s in namespace %s to be deleted", label, namespace)
return false
}
// PrintPodStatus log out the status phase of a pod
func (k8sh *K8sHelper) PrintPodStatus(namespace string) {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to get pod status in namespace %s. %+v", namespace, err)
return
}
for _, pod := range pods.Items {
logger.Infof("%s (%s) pod status: %+v", pod.Name, namespace, pod.Status)
}
}
func (k8sh *K8sHelper) GetPodDescribeFromNamespace(namespace, testName, platformName string) {
logger.Infof("Gathering pod describe for all pods in namespace %s", namespace)
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pods in namespace %s. %+v", namespace, err)
return
}
file, err := k8sh.createTestLogFile(platformName, "podDescribe", namespace, testName, "")
if err != nil {
return
}
defer file.Close()
for _, p := range pods.Items {
k8sh.appendPodDescribe(file, namespace, p.Name)
}
}
func (k8sh *K8sHelper) appendPodDescribe(file *os.File, namespace, name string) {
description := k8sh.getPodDescribe(namespace, name)
if description == "" {
return
}
writeHeader(file, fmt.Sprintf("Pod: %s\n", name))
file.WriteString(description)
file.WriteString("\n")
}
func (k8sh *K8sHelper) PrintPodDescribe(namespace string, args ...string) {
description := k8sh.getPodDescribe(namespace, args...)
if description == "" {
return
}
logger.Infof("POD Description:\n%s", description)
}
func (k8sh *K8sHelper) getPodDescribe(namespace string, args ...string) string {
args = append([]string{"describe", "pod", "-n", namespace}, args...)
description, err := k8sh.Kubectl(args...)
if err != nil {
logger.Errorf("failed to describe pod. %v %+v", args, err)
return ""
}
return description
}
func (k8sh *K8sHelper) PrintEventsForNamespace(namespace string) {
events, err := k8sh.Clientset.CoreV1().Events(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Warningf("failed to get events in namespace %s. %+v", namespace, err)
return
}
logger.Infof("DUMPING events in namespace %s", namespace)
for _, event := range events.Items {
logger.Infof("%+v", event)
}
logger.Infof("DONE DUMPING events in namespace %s", namespace)
}
// IsPodRunning returns true if a Pod is running status or goes to Running status within 90s else returns false
func (k8sh *K8sHelper) IsPodRunning(name string, namespace string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
pod, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
if err == nil {
if pod.Status.Phase == "Running" {
return true
}
}
inc++
time.Sleep(RetryInterval * time.Second)
logger.Infof("waiting for pod %s in namespace %s to be running", name, namespace)
}
pod, _ := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
k8sh.PrintPodDescribe(namespace, pod.Name)
logger.Infof("Giving up waiting for pod %s in namespace %s to be running", name, namespace)
return false
}
// IsPodTerminated wrapper around IsPodTerminatedWithOpts()
func (k8sh *K8sHelper) IsPodTerminated(name string, namespace string) bool {
return k8sh.IsPodTerminatedWithOpts(name, namespace, metav1.GetOptions{})
}
// IsPodTerminatedWithOpts returns true if a Pod is terminated status or goes to Terminated status
// within 90s else returns false\
func (k8sh *K8sHelper) IsPodTerminatedWithOpts(name string, namespace string, getOpts metav1.GetOptions) bool {
inc := 0
for inc < RetryLoop {
pod, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
if err != nil {
k8slogger.Infof("Pod %s in namespace %s terminated ", name, namespace)
return true
}
k8slogger.Infof("waiting for Pod %s in namespace %s to terminate, status : %+v", name, namespace, pod.Status)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8slogger.Infof("Pod %s in namespace %s did not terminate", name, namespace)
return false
}
// IsServiceUp returns true if a service is up or comes up within 150s, else returns false
func (k8sh *K8sHelper) IsServiceUp(name string, namespace string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
_, err := k8sh.Clientset.CoreV1().Services(namespace).Get(name, getOpts)
if err == nil {
k8slogger.Infof("Service: %s in namespace: %s is up", name, namespace)
return true
}
k8slogger.Infof("waiting for Service %s in namespace %s ", name, namespace)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8slogger.Infof("Giving up waiting for service: %s in namespace %s ", name, namespace)
return false
}
// GetService returns output from "kubectl get svc $NAME" command
func (k8sh *K8sHelper) GetService(servicename string, namespace string) (*v1.Service, error) {
getOpts := metav1.GetOptions{}
result, err := k8sh.Clientset.CoreV1().Services(namespace).Get(servicename, getOpts)
if err != nil {
return nil, fmt.Errorf("Cannot find service %s in namespace %s, err-- %v", servicename, namespace, err)
}
return result, nil
}
// IsCRDPresent returns true if custom resource definition is present
func (k8sh *K8sHelper) IsCRDPresent(crdName string) bool {
cmdArgs := []string{"get", "crd", crdName}
inc := 0
for inc < RetryLoop {
_, err := k8sh.Kubectl(cmdArgs...)
if err == nil {
k8slogger.Infof("Found the CRD resource: " + crdName)
return true
}
time.Sleep(RetryInterval * time.Second)
inc++
}
return false
}
// WriteToPod write file in Pod
func (k8sh *K8sHelper) WriteToPod(namespace, podName, filename, message string) error {
return k8sh.WriteToPodRetry(namespace, podName, filename, message, 1)
}
// WriteToPodRetry WriteToPod in a retry loop
func (k8sh *K8sHelper) WriteToPodRetry(namespace, podName, filename, message string, retries int) error {
logger.Infof("Writing file %s to pod %s", filename, podName)
var err error
for i := 0; i < retries; i++ {
if i > 0 {
logger.Infof("retrying write in 5s...")
time.Sleep(5 * time.Second)
}
err = k8sh.writeToPod(namespace, podName, filename, message)
if err == nil {
logger.Infof("write file %s in pod %s was successful", filename, podName)
return nil
}
}
return fmt.Errorf("failed to write file %s to pod %s. %+v", filename, podName, err)
}
func (k8sh *K8sHelper) ReadFromPod(namespace, podName, filename, expectedMessage string) error {
return k8sh.ReadFromPodRetry(namespace, podName, filename, expectedMessage, 1)
}
func (k8sh *K8sHelper) ReadFromPodRetry(namespace, podName, filename, expectedMessage string, retries int) error {
logger.Infof("Reading file %s from pod %s", filename, podName)
var err error
for i := 0; i < retries; i++ {
if i > 0 {
logger.Infof("retrying read in 5s...")
time.Sleep(5 * time.Second)
}
var data string
data, err = k8sh.readFromPod(namespace, podName, filename)
if err == nil {
logger.Infof("read file %s from pod %s was successful after %d attempt(s)", filename, podName, (i + 1))
if !strings.Contains(data, expectedMessage) {
return fmt.Errorf(`file %s in pod %s returned message "%s" instead of "%s"`, filename, podName, data, expectedMessage)
}
return nil
}
}
return fmt.Errorf("failed to read file %s from pod %s. %+v", filename, podName, err)
}
func (k8sh *K8sHelper) writeToPod(namespace, name, filename, message string) error {
wt := "echo \"" + message + "\">" + path.Join(TestMountPath, filename)
args := []string{"exec", name}
if namespace != "" {
args = append(args, "-n", namespace)
}
args = append(args, "--", "sh", "-c", wt)
_, err := k8sh.Kubectl(args...)
if err != nil {
return fmt.Errorf("failed to write file %s to pod %s. %+v", filename, name, err)
}
return nil
}
func (k8sh *K8sHelper) readFromPod(namespace, name, filename string) (string, error) {
rd := path.Join(TestMountPath, filename)
args := []string{"exec", name}
if namespace != "" {
args = append(args, "-n", namespace)
}
args = append(args, "--", "cat", rd)
result, err := k8sh.Kubectl(args...)
if err != nil {
return "", fmt.Errorf("failed to read file %s from pod %s. %+v", filename, name, err)
}
return result, nil
}
// GetVolumeResourceName gets the Volume object name from the PVC
func (k8sh *K8sHelper) GetVolumeResourceName(namespace, pvcName string) (string, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcName, getOpts)
if err != nil {
return "", err
}
return pvc.Spec.VolumeName, nil
}
// IsVolumeResourcePresent returns true if Volume resource is present
func (k8sh *K8sHelper) IsVolumeResourcePresent(namespace, volumeName string) bool {
err := k8sh.waitForVolume(namespace, volumeName, true)
if err != nil {
k8slogger.Error(err.Error())
return false
}
return true
}
// IsVolumeResourceAbsent returns true if the Volume resource is deleted/absent within 90s else returns false
func (k8sh *K8sHelper) IsVolumeResourceAbsent(namespace, volumeName string) bool {
err := k8sh.waitForVolume(namespace, volumeName, false)
if err != nil {
k8slogger.Error(err.Error())
return false
}
return true
}
func (k8sh *K8sHelper) waitForVolume(namespace, volumeName string, exist bool) error {
action := "exist"
if !exist {
action = "not " + action
}
inc := 0
for inc < RetryLoop {
isExist, err := k8sh.isVolumeExist(namespace, volumeName)
if err != nil {
return fmt.Errorf("Errors encountered while getting Volume %s/%s: %v", namespace, volumeName, err)
}
if isExist == exist {
return nil
}
k8slogger.Infof("waiting for Volume %s in namespace %s to %s", volumeName, namespace, action)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8sh.printVolumes(namespace, volumeName)
k8sh.PrintPVs(false /*detailed*/)
k8sh.PrintPVCs(namespace, false /*detailed*/)
return fmt.Errorf("timeout for Volume %s in namespace %s wait to %s", volumeName, namespace, action)
}
func (k8sh *K8sHelper) PrintPVs(detailed bool) {
pvs, err := k8sh.Clientset.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pvs. %+v", err)
return
}
if detailed {
logger.Infof("Found %d PVs", len(pvs.Items))
for _, pv := range pvs.Items {
logger.Infof("PV %s: %+v", pv.Name, pv)
}
} else {
var names []string
for _, pv := range pvs.Items {
names = append(names, pv.Name)
}
logger.Infof("Found PVs: %v", names)
}
}
func (k8sh *K8sHelper) PrintPVCs(namespace string, detailed bool) {
pvcs, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pvcs. %+v", err)
return
}
if detailed {
logger.Infof("Found %d PVCs", len(pvcs.Items))
for _, pvc := range pvcs.Items {
logger.Infof("PVC %s: %+v", pvc.Name, pvc)
}
} else {
var names []string
for _, pvc := range pvcs.Items {
names = append(names, pvc.Name)
}
logger.Infof("Found PVCs: %v", names)
}
}
func (k8sh *K8sHelper) PrintStorageClasses(detailed bool) {
scs, err := k8sh.Clientset.StorageV1().StorageClasses().List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list StorageClasses: %+v", err)
return
}
if detailed {
logger.Infof("Found %d StorageClasses", len(scs.Items))
for _, sc := range scs.Items {
logger.Infof("StorageClass %s: %+v", sc.Name, sc)
}
} else {
var names []string
for _, sc := range scs.Items {
names = append(names, sc.Name)
}
logger.Infof("Found StorageClasses: %v", names)
}
}
func (k8sh *K8sHelper) printVolumes(namespace, desiredVolume string) {
volumes, err := k8sh.RookClientset.RookV1alpha2().Volumes(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Infof("failed to list volumes in ns %s. %+v", namespace, err)
}
var names []string
for _, volume := range volumes.Items {
names = append(names, volume.Name)
}
logger.Infof("looking for volume %s in namespace %s. Found volumes: %v", desiredVolume, namespace, names)
}
func (k8sh *K8sHelper) isVolumeExist(namespace, name string) (bool, error) {
_, err := k8sh.RookClientset.RookV1alpha2().Volumes(namespace).Get(name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
}
func (k8sh *K8sHelper) GetPodNamesForApp(appName, namespace string) ([]string, error) {
args := []string{"get", "pod", "-n", namespace, "-l", fmt.Sprintf("app=%s", appName),
"-o", "jsonpath={.items[*].metadata.name}"}
result, err := k8sh.Kubectl(args...)
if err != nil {
return nil, fmt.Errorf("failed to get pod names for app %s: %+v. output: %s", appName, err, result)
}
podNames := strings.Split(result, " ")
return podNames, nil
}
// GetPodDetails returns details about a pod
func (k8sh *K8sHelper) GetPodDetails(podNamePattern string, namespace string) (string, error) {
args := []string{"get", "pods", "-l", "app=" + podNamePattern, "-o", "wide", "--no-headers=true", "-o", "name"}
if namespace != "" {
args = append(args, []string{"-n", namespace}...)
}
result, err := k8sh.Kubectl(args...)
if err != nil || strings.Contains(result, "No resources found") {
return "", fmt.Errorf("Cannot find pod in with name like %s in namespace : %s -- %v", podNamePattern, namespace, err)
}
return strings.TrimSpace(result), nil
}
// GetPodEvents returns events about a pod
func (k8sh *K8sHelper) GetPodEvents(podNamePattern string, namespace string) (*v1.EventList, error) {
uri := fmt.Sprintf("api/v1/namespaces/%s/events?fieldSelector=involvedObject.name=%s,involvedObject.namespace=%s", namespace, podNamePattern, namespace)
result, err := k8sh.Clientset.CoreV1().RESTClient().Get().RequestURI(uri).DoRaw()
if err != nil {
logger.Errorf("Cannot get events for pod %v in namespace %v, err: %v", podNamePattern, namespace, err)
return nil, fmt.Errorf("Cannot get events for pod %s in namespace %s, err: %v", podNamePattern, namespace, err)
}
events := v1.EventList{}
err = json.Unmarshal(result, &events)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal eventlist response: %v", err)
}
return &events, nil
}
// IsPodInError returns true if a Pod is in error status with the given reason and contains the given message
func (k8sh *K8sHelper) IsPodInError(podNamePattern, namespace, reason, containingMessage string) bool {
inc := 0
for inc < RetryLoop {
events, err := k8sh.GetPodEvents(podNamePattern, namespace)
if err != nil {
k8slogger.Errorf("Cannot get Pod events for %s in namespace %s: %+v ", podNamePattern, namespace, err)
return false
}
for _, e := range events.Items {
if e.Reason == reason && strings.Contains(e.Message, containingMessage) {
return true
}
}
k8slogger.Infof("waiting for Pod %s in namespace %s to error with reason %s and containing the message: %s", podNamePattern, namespace, reason, containingMessage)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8slogger.Infof("Pod %s in namespace %s did not error with reason %s", podNamePattern, namespace, reason)
return false
}
// GetPodHostID returns HostIP address of a pod
func (k8sh *K8sHelper) GetPodHostID(podNamePattern string, namespace string) (string, error) {
listOpts := metav1.ListOptions{LabelSelector: "app=" + podNamePattern}
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
return "", fmt.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
}
if len(podList.Items) < 1 {
logger.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
return "", fmt.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
}
return podList.Items[0].Status.HostIP, nil
}
// GetServiceNodePort returns nodeProt of service
func (k8sh *K8sHelper) GetServiceNodePort(serviceName string, namespace string) (string, error) {
getOpts := metav1.GetOptions{}
svc, err := k8sh.Clientset.CoreV1().Services(namespace).Get(serviceName, getOpts)
if err != nil {
logger.Errorf("Cannot get service : %v in namespace %v, err: %v", serviceName, namespace, err)
return "", fmt.Errorf("Cannot get service : %v in namespace %v, err: %v", serviceName, namespace, err)
}
np := svc.Spec.Ports[0].NodePort
return strconv.FormatInt(int64(np), 10), nil
}
// IsStorageClassPresent returns true if storageClass is present, if not false
func (k8sh *K8sHelper) IsStorageClassPresent(name string) error {
args := []string{"get", "storageclass", "-o", "jsonpath='{.items[*].metadata.name}'"}
result, err := k8sh.Kubectl(args...)
if strings.Contains(result, name) {
return nil
}
return fmt.Errorf("Storageclass %s not found, err ->%v", name, err)
}
func (k8sh *K8sHelper) IsDefaultStorageClassPresent() (bool, error) {
scs, err := k8sh.Clientset.StorageV1().StorageClasses().List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list StorageClasses: %+v", err)
}
for _, sc := range scs.Items {
if storagev1util.IsDefaultAnnotation(sc.ObjectMeta) {
return true, nil
}
}
return false, nil
}
// CheckPvcCount returns True if expected number pvs for a app are found
func (k8sh *K8sHelper) CheckPvcCountAndStatus(podName string, namespace string, expectedPvcCount int, expectedStatus string) bool {
logger.Infof("wait until %d pvc for app=%s are present", expectedPvcCount, podName)
listOpts := metav1.ListOptions{LabelSelector: "app=" + podName}
pvcCountCheck := false
actualPvcCount := 0
inc := 0
for inc < RetryLoop {
pvcList, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot get pvc for app : %v in namespace %v, err: %v", podName, namespace, err)
return false
}
actualPvcCount = len(pvcList.Items)
if actualPvcCount == expectedPvcCount {
pvcCountCheck = true
break
}
inc++
time.Sleep(RetryInterval * time.Second)
}
if !pvcCountCheck {
logger.Errorf("Expecting %d number of PVCs for %s app, found %d ", expectedPvcCount, podName, actualPvcCount)
return false
}
inc = 0
for inc < RetryLoop {
checkAllPVCsStatus := true
pl, _ := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
for _, pvc := range pl.Items {
if !(pvc.Status.Phase == v1.PersistentVolumeClaimPhase(expectedStatus)) {
checkAllPVCsStatus = false
logger.Infof("waiting for pvc %v to be in %s Phase, currently in %v Phase", pvc.Name, expectedStatus, pvc.Status.Phase)
}
}
if checkAllPVCsStatus {
return true
}
inc++
time.Sleep(RetryInterval * time.Second)
}
logger.Errorf("Giving up waiting for %d PVCs for %s app to be in %s phase", expectedPvcCount, podName, expectedStatus)
return false
}
// GetPVCStatus returns status of PVC
func (k8sh *K8sHelper) GetPVCStatus(namespace string, name string) (v1.PersistentVolumeClaimPhase, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(name, getOpts)
if err != nil {
return v1.ClaimLost, fmt.Errorf("PVC %s not found,err->%v", name, err)
}
return pvc.Status.Phase, nil
}
// GetPVCVolumeName returns volume name of PVC
func (k8sh *K8sHelper) GetPVCVolumeName(namespace string, name string) (string, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(name, getOpts)
if err != nil {
return "", fmt.Errorf("PVC %s not found,err->%v", name, err)
}
return pvc.Spec.VolumeName, nil
}
// GetPVCAccessModes returns AccessModes on PVC
func (k8sh *K8sHelper) GetPVCAccessModes(namespace string, name string) ([]v1.PersistentVolumeAccessMode, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(name, getOpts)
if err != nil {
return []v1.PersistentVolumeAccessMode{}, fmt.Errorf("PVC %s not found,err->%v", name, err)
}
return pvc.Status.AccessModes, nil
}
// GetPV returns PV by name
func (k8sh *K8sHelper) GetPV(name string) (*v1.PersistentVolume, error) {
getOpts := metav1.GetOptions{}
pv, err := k8sh.Clientset.CoreV1().PersistentVolumes().Get(name, getOpts)
if err != nil {
return nil, fmt.Errorf("PV %s not found,err->%v", name, err)
}
return pv, nil
}
// IsPodInExpectedState waits for 90s for a pod to be an expected state
// If the pod is in expected state within 90s true is returned, if not false
func (k8sh *K8sHelper) IsPodInExpectedState(podNamePattern string, namespace string, state string) bool {
listOpts := metav1.ListOptions{LabelSelector: "app=" + podNamePattern}
inc := 0
for inc < RetryLoop {
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err == nil {
if len(podList.Items) >= 1 {
if podList.Items[0].Status.Phase == v1.PodPhase(state) {
return true
}
}
}
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
}
// CheckPodCountAndState returns true if expected number of pods with matching name are found and are in expected state
func (k8sh *K8sHelper) CheckPodCountAndState(podName string, namespace string, minExpected int, expectedPhase string) bool {
listOpts := metav1.ListOptions{LabelSelector: "app=" + podName}
podCountCheck := false
actualPodCount := 0
inc := 0
for inc < RetryLoop {
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot list pods for app=%s in namespace %s, err: %+v", podName, namespace, err)
return false
}
actualPodCount = len(podList.Items)
if actualPodCount >= minExpected {
logger.Infof("%d of %d pods with label app=%s were found", actualPodCount, minExpected, podName)
podCountCheck = true
break
}
inc++
logger.Infof("waiting for %d pods with label app=%s, found %d", minExpected, podName, actualPodCount)
time.Sleep(RetryInterval * time.Second)
}
if !podCountCheck {
logger.Errorf("Expecting %d number of pods for %s app, found %d ", minExpected, podName, actualPodCount)
return false
}
for i := 0; i < RetryLoop; i++ {
checkAllPodsStatus := true
pl, _ := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
for _, pod := range pl.Items {
if !(pod.Status.Phase == v1.PodPhase(expectedPhase)) {
checkAllPodsStatus = false
logger.Infof("waiting for pod %v to be in %s Phase, currently in %v Phase", pod.Name, expectedPhase, pod.Status.Phase)
}
}
if checkAllPodsStatus {
return true
}
time.Sleep(RetryInterval * time.Second)
}
logger.Errorf("All pods with app Name %v not in %v phase ", podName, expectedPhase)
k8sh.PrintPodDescribe(namespace, "-l", listOpts.LabelSelector)
return false
}
// WaitUntilPodInNamespaceIsDeleted waits for 90s for a pod in a namespace to be terminated
// If the pod disappears within 90s true is returned, if not false
func (k8sh *K8sHelper) WaitUntilPodInNamespaceIsDeleted(podNamePattern string, namespace string) bool {
inc := 0
for inc < RetryLoop {
out, _ := k8sh.GetResource("-n", namespace, "pods", "-l", "app="+podNamePattern)
if !strings.Contains(out, podNamePattern) {
return true
}
inc++
time.Sleep(RetryInterval * time.Second)
}
logger.Infof("Pod %s in namespace %s not deleted", podNamePattern, namespace)
return false
}
// WaitUntilPodIsDeleted waits for 90s for a pod to be terminated
// If the pod disappears within 90s true is returned, if not false
func (k8sh *K8sHelper) WaitUntilPodIsDeleted(name, namespace string) bool {
inc := 0
for inc < RetryLoop {
_, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true
}
inc++
logger.Infof("pod %s in namespace %s is not deleted yet", name, namespace)
time.Sleep(RetryInterval * time.Second)
}
return false
}
// WaitUntilPVCIsBound waits for a PVC to be in bound state for 90 seconds
// if PVC goes to Bound state within 90s True is returned, if not false
func (k8sh *K8sHelper) WaitUntilPVCIsBound(namespace string, pvcname string) bool {
inc := 0
for inc < RetryLoop {
out, err := k8sh.GetPVCStatus(namespace, pvcname)
if err == nil {
if out == v1.PersistentVolumeClaimPhase(v1.ClaimBound) {
logger.Infof("PVC %s is bound", pvcname)
return true
}
}
logger.Infof("waiting for PVC %s to be bound. current=%s. err=%+v", pvcname, out, err)
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
}
// WaitUntilPVCIsExpanded waits for a PVC to be resized for specified value
func (k8sh *K8sHelper) WaitUntilPVCIsExpanded(namespace, pvcname, size string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
// PVC specs changes immediately, but status will change only if resize process is successfully completed.
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcname, getOpts)
if err == nil {
currentSize := pvc.Status.Capacity[v1.ResourceStorage]
if currentSize.String() == size {
logger.Infof("PVC %s is resized", pvcname)
return true
}
logger.Infof("waiting for PVC %s to be resized, current: %s, expected: %s", pvcname, currentSize.String(), size)
} else {
logger.Infof("error while getting PVC specs: %+v", err)
}
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
}
func (k8sh *K8sHelper) WaitUntilPVCIsDeleted(namespace string, pvcname string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
_, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcname, getOpts)
if err != nil {
return true
}
logger.Infof("waiting for PVC %s to be deleted.", pvcname)
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
}
func (k8sh *K8sHelper) DeletePvcWithLabel(namespace string, podName string) bool {
delOpts := metav1.DeleteOptions{}
listOpts := metav1.ListOptions{LabelSelector: "app=" + podName}
err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).DeleteCollection(&delOpts, listOpts)
if err != nil {
logger.Errorf("cannot deleted PVCs for pods with label app=%s", podName)
return false
}
inc := 0
for inc < RetryLoop {
pvcs, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
if err == nil {
if len(pvcs.Items) == 0 {
return true
}
}
logger.Infof("waiting for PVCs for pods with label=%s to be deleted.", podName)
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
}
// WaitUntilNameSpaceIsDeleted waits for namespace to be deleted for 180s.
// If namespace is deleted True is returned, if not false.
func (k8sh *K8sHelper) WaitUntilNameSpaceIsDeleted(namespace string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
ns, err := k8sh.Clientset.CoreV1().Namespaces().Get(namespace, getOpts)
if err != nil {
return true
}
logger.Infof("Namespace %s %v", namespace, ns.Status.Phase)
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
}
// CreateExternalRGWService creates a service for rgw access external to the cluster on a node port
func (k8sh *K8sHelper) CreateExternalRGWService(namespace, storeName string) error {
svcName := "rgw-external-" + storeName
externalSvc := `apiVersion: v1
kind: Service
metadata:
name: ` + svcName + `
namespace: ` + namespace + `
labels:
app: rook-ceph-rgw
rook_cluster: ` + namespace + `
spec:
ports:
- name: rook-ceph-rgw
port: 53390
protocol: TCP
selector:
app: rook-ceph-rgw
rook_cluster: ` + namespace + `
sessionAffinity: None
type: NodePort
`
_, err := k8sh.KubectlWithStdin(externalSvc, []string{"apply", "-f", "-"}...)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create external service. %+v", err)
}
return nil
}
func (k8sh *K8sHelper) GetRGWServiceURL(storeName string, namespace string) (string, error) {
if k8sh.RunningInCluster {
return k8sh.GetInternalRGWServiceURL(storeName, namespace)
}
return k8sh.GetExternalRGWServiceURL(storeName, namespace)
}
// GetRGWServiceURL returns URL of ceph RGW service in the cluster
func (k8sh *K8sHelper) GetInternalRGWServiceURL(storeName string, namespace string) (string, error) {
name := "rook-ceph-rgw-" + storeName
svc, err := k8sh.GetService(name, namespace)
if err != nil {
return "", fmt.Errorf("RGW service not found/object. %+v", err)
}
endpoint := fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, svc.Spec.Ports[0].Port)
logger.Infof("internal rgw endpoint: %s", endpoint)
return endpoint, nil
}
// GetRGWServiceURL returns URL of ceph RGW service in the cluster
func (k8sh *K8sHelper) GetExternalRGWServiceURL(storeName string, namespace string) (string, error) {
hostip, err := k8sh.GetPodHostID("rook-ceph-rgw", namespace)
if err != nil {
return "", fmt.Errorf("RGW pods not found. %+v", err)
}
serviceName := "rgw-external-" + storeName
nodePort, err := k8sh.GetServiceNodePort(serviceName, namespace)
if err != nil {
return "", fmt.Errorf("RGW service not found. %+v", err)
}
endpoint := hostip + ":" + nodePort
logger.Infof("external rgw endpoint: %s", endpoint)
return endpoint, err
}
// ChangeHostnames modifies the node hostname label to run tests in an environment where the node name is different from the hostname label
func (k8sh *K8sHelper) ChangeHostnames() error {
nodes, err := k8sh.Clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
for _, node := range nodes.Items {
hostname := node.Labels[v1.LabelHostname]
if !strings.HasPrefix(hostname, hostnameTestPrefix) {
node.Labels[v1.LabelHostname] = hostnameTestPrefix + hostname
logger.Infof("changed hostname of node %s to %s", node.Name, node.Labels[v1.LabelHostname])
_, err := k8sh.Clientset.CoreV1().Nodes().Update(&node)
if err != nil {
return err
}
}
}
return nil
}
// RestoreHostnames removes the test suffix from the node hostname labels
func (k8sh *K8sHelper) RestoreHostnames() ([]string, error) {
nodes, err := k8sh.Clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
hostname := node.Labels[v1.LabelHostname]
if strings.HasPrefix(hostname, hostnameTestPrefix) {
node.Labels[v1.LabelHostname] = hostname[len(hostnameTestPrefix):]
logger.Infof("restoring hostname of node %s to %s", node.Name, node.Labels[v1.LabelHostname])
_, err := k8sh.Clientset.CoreV1().Nodes().Update(&node)
if err != nil {
return nil, err
}
}
}
return nil, nil
}
// IsRookInstalled returns true is rook-ceph-mgr service is running(indicating rook is installed)
func (k8sh *K8sHelper) IsRookInstalled(namespace string) bool {
opts := metav1.GetOptions{}
_, err := k8sh.Clientset.CoreV1().Services(namespace).Get("rook-ceph-mgr", opts)
if err == nil {
return true
}
return false
}
// CollectPodLogsFromLabel collects logs for pods with the given label
func (k8sh *K8sHelper) CollectPodLogsFromLabel(podLabel, namespace, testName, platformName string) {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: podLabel})
if err != nil {
logger.Errorf("failed to list pods in namespace %s. %+v", namespace, err)
return
}
k8sh.getPodsLogs(pods, namespace, testName, platformName)
}
// GetLogsFromNamespace collects logs for all containers in all pods in the namespace
func (k8sh *K8sHelper) GetLogsFromNamespace(namespace, testName, platformName string) {
logger.Infof("Gathering logs for all pods in namespace %s", namespace)
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pods in namespace %s. %+v", namespace, err)
return
}
k8sh.getPodsLogs(pods, namespace, testName, platformName)
}
func (k8sh *K8sHelper) getPodsLogs(pods *v1.PodList, namespace, testName, platformName string) {
for _, p := range pods.Items {
k8sh.getPodLogs(p, platformName, namespace, testName, false)
if strings.Contains(p.Name, "operator") {
// get the previous logs for the operator
k8sh.getPodLogs(p, platformName, namespace, testName, true)
}
}
}
func (k8sh *K8sHelper) createTestLogFile(platformName, name, namespace, testName, suffix string) (*os.File, error) {
dir, _ := os.Getwd()
logDir := path.Join(dir, "_output/tests/")
if _, err := os.Stat(logDir); os.IsNotExist(err) {
err := os.MkdirAll(logDir, 0777)
if err != nil {
logger.Errorf("Cannot get logs files dir for app : %v in namespace %v, err: %v", name, namespace, err)
return nil, err
}
}
fileName := fmt.Sprintf("%s_%s_%s_%s%s_%d.log", testName, platformName, namespace, name, suffix, time.Now().Unix())
filePath := path.Join(logDir, fileName)
file, err := os.Create(filePath)
if err != nil {
logger.Errorf("Cannot create file %s. %v", filePath, err)
return nil, err
}
logger.Debugf("created log file: %s", filePath)
return file, nil
}
func (k8sh *K8sHelper) getPodLogs(pod v1.Pod, platformName, namespace, testName string, previousLog bool) {
suffix := ""
if previousLog {
suffix = "_previous"
}
file, err := k8sh.createTestLogFile(platformName, pod.Name, namespace, testName, suffix)
if err != nil {
return
}
defer file.Close()
for _, container := range pod.Spec.InitContainers {
k8sh.appendContainerLogs(file, pod, container.Name, previousLog, true)
}
for _, container := range pod.Spec.Containers {
k8sh.appendContainerLogs(file, pod, container.Name, previousLog, false)
}
}
func writeHeader(file *os.File, message string) {
file.WriteString("\n-----------------------------------------\n")
file.WriteString(message)
file.WriteString("\n-----------------------------------------\n")
}
func (k8sh *K8sHelper) appendContainerLogs(file *os.File, pod v1.Pod, containerName string, previousLog, initContainer bool) {
message := fmt.Sprintf("CONTAINER: %s", containerName)
if initContainer {
message = "INIT " + message
}
writeHeader(file, message)
logOpts := &v1.PodLogOptions{Previous: previousLog}
if containerName != "" {
logOpts.Container = containerName
}
res := k8sh.Clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, logOpts).Do()
rawData, err := res.Raw()
if err != nil {
logger.Errorf("Cannot get logs for pod %s and container %s. %v", pod.Name, containerName, err)
return
}
if _, err := file.Write(rawData); err != nil {
logger.Errorf("Errors while writing logs for pod %s and container %s. %v", pod.Name, containerName, err)
}
}
// CreateAnonSystemClusterBinding Creates anon-user-access clusterrolebinding for cluster-admin role - used by kubeadm env.
func (k8sh *K8sHelper) CreateAnonSystemClusterBinding() {
args := []string{"create", "clusterrolebinding", "anon-user-access", "--clusterrole", "cluster-admin", "--user", "system:anonymous"}
_, err := k8sh.Kubectl(args...)
if err != nil {
logger.Warningf("anon-user-access not created")
return
}
logger.Infof("anon-user-access creation completed, waiting for it to exist in API")
inc := 0
for inc < RetryLoop {
var err error
if _, err = k8sh.Clientset.RbacV1beta1().ClusterRoleBindings().Get("anon-user-access", metav1.GetOptions{}); err == nil {
break
}
logger.Warningf("failed to get anon-user-access clusterrolebinding, will try again: %+v", err)
inc++
time.Sleep(RetryInterval * time.Second)
}
}
func (k8sh *K8sHelper) DeleteRoleAndBindings(name, namespace string) error {
err := k8sh.DeleteResource("role", name, "-n", namespace)
if err != nil {
return err
}
err = k8sh.DeleteResource("rolebinding", name, "-n", namespace)
if err != nil {
return err
}
return nil
}
func (k8sh *K8sHelper) DeleteRoleBinding(name, namespace string) error {
err := k8sh.DeleteResource("rolebinding", name, "-n", namespace)
return err
}
func (k8sh *K8sHelper) ScaleStatefulSet(statefulSetName, namespace string, replicationSize int) error {
args := []string{"-n", namespace, "scale", "statefulsets", statefulSetName, fmt.Sprintf("--replicas=%d", replicationSize)}
_, err := k8sh.Kubectl(args...)
return err
}
func IsKubectlErrorNotFound(output string, err error) bool {
return err != nil && strings.Contains(output, "Error from server (NotFound)")
}
// WaitForDeploymentCount waits until the desired number of deployments with the label exist. The
// deployments are not guaranteed to be running, only existing.
func (k8sh *K8sHelper) WaitForDeploymentCount(label, namespace string, count int) error {
return k8sh.WaitForDeploymentCountWithRetries(label, namespace, count, RetryLoop)
}
// WaitForDeploymentCountWithRetries waits until the desired number of deployments with the label
// exist, retrying the specified number of times. The deployments are not guaranteed to be running,
// only existing.
func (k8sh *K8sHelper) WaitForDeploymentCountWithRetries(label, namespace string, count, retries int) error {
options := metav1.ListOptions{LabelSelector: label}
for i := 0; i < retries; i++ {
deps, err := k8sh.Clientset.AppsV1().Deployments(namespace).List(options)
numDeps := 0
if err == nil {
numDeps = len(deps.Items)
}
if numDeps >= count {
logger.Infof("found %d of %d deployments with label %s in namespace %s", numDeps, count, label, namespace)
return nil
}
logger.Infof("waiting for %d deployments (found %d) with label %s in namespace %s", count, numDeps, label, namespace)
time.Sleep(RetryInterval * time.Second)
}
return fmt.Errorf("giving up waiting for %d deployments with label %s in namespace %s", count, label, namespace)
}
// WaitForLabeledDeploymentsToBeReady waits for all deployments matching the given label selector to
// be fully ready with a default timeout.
func (k8sh *K8sHelper) WaitForLabeledDeploymentsToBeReady(label, namespace string) error {
return k8sh.WaitForLabeledDeploymentsToBeReadyWithRetries(label, namespace, RetryLoop)
}
// WaitForLabeledDeploymentsToBeReadyWithRetries waits for all deployments matching the given label
// selector to be fully ready. Retry the number of times given.
func (k8sh *K8sHelper) WaitForLabeledDeploymentsToBeReadyWithRetries(label, namespace string, retries int) error {
listOpts := metav1.ListOptions{LabelSelector: label}
var lastDep apps.Deployment
for i := 0; i < retries; i++ {
deps, err := k8sh.Clientset.AppsV1().Deployments(namespace).List(listOpts)
ready := 0
if err == nil && len(deps.Items) > 0 {
for _, dep := range deps.Items {
if dep.Status.Replicas == dep.Status.ReadyReplicas {
ready++
} else {
lastDep = dep // make it the last non-ready dep
}
if ready == len(deps.Items) {
logger.Infof("all %d deployments with label %s are running", len(deps.Items), label)
return nil
}
}
}
logger.Infof("waiting for deployment(s) with label %s in namespace %s to be running. ready=%d/%d, err=%+v",
label, namespace, ready, len(deps.Items), err)
time.Sleep(RetryInterval * time.Second)
}
if len(lastDep.Name) == 0 {
logger.Infof("no deployment was found with label %s", label)
} else {
r, err := k8sh.Kubectl("-n", namespace, "get", "-o", "yaml", "deployments", "--selector", label)
if err != nil {
logger.Infof("deployments with label %s:\n%s", label, r)
}
}
return fmt.Errorf("giving up waiting for deployment(s) with label %s in namespace %s to be ready", label, namespace)
}
|
[
"\"DOCKERCMD\""
] |
[] |
[
"DOCKERCMD"
] |
[]
|
["DOCKERCMD"]
|
go
| 1 | 0 | |
tests/test_envchecks.py
|
import os
from botbot import envchecks
def test_generic_env_checker():
generic = envchecks._var_check_builder('DOOT',
None,
'PROB_DOOT')
os.environ['DOOT'] = 'thank mr skeltal'
assert generic(important=['thank', 'mr', 'skeltal']) is None
assert 'PROB_DOOT' in generic(important=['forgot', 'to', 'thank'])
del os.environ['DOOT']
def test_path_checker():
assert envchecks.path_sufficient(important=['/usr/bin']) is None
assert envchecks.path_sufficient(important=['/not/a/real/path']) is not None
def test_ld_checker():
try:
llp = os.environ['LD_LIBRARY_PATH']
assert envchecks.ld_lib_path_sufficient(important=['lib']) is None
except KeyError:
assert 'PROB_VAR_NOT_SET' in envchecks.ld_lib_path_sufficient()
|
[] |
[] |
[
"LD_LIBRARY_PATH",
"DOOT"
] |
[]
|
["LD_LIBRARY_PATH", "DOOT"]
|
python
| 2 | 0 | |
homeassistant/components/mqtt/vacuum.py
|
"""
Support for a generic MQTT vacuum.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.mqtt import (
ATTR_DISCOVERY_HASH, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from homeassistant.components.mqtt.discovery import (
MQTT_DISCOVERY_NEW, clear_discovery_hash)
from homeassistant.components.vacuum import (
DOMAIN, SUPPORT_BATTERY, SUPPORT_CLEAN_SPOT, SUPPORT_FAN_SPEED,
SUPPORT_LOCATE, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_SEND_COMMAND,
SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
VacuumDevice)
from homeassistant.const import ATTR_SUPPORTED_FEATURES, CONF_DEVICE, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.icon import icon_for_battery_level
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
SERVICE_TO_STRING = {
SUPPORT_TURN_ON: 'turn_on',
SUPPORT_TURN_OFF: 'turn_off',
SUPPORT_PAUSE: 'pause',
SUPPORT_STOP: 'stop',
SUPPORT_RETURN_HOME: 'return_home',
SUPPORT_FAN_SPEED: 'fan_speed',
SUPPORT_BATTERY: 'battery',
SUPPORT_STATUS: 'status',
SUPPORT_SEND_COMMAND: 'send_command',
SUPPORT_LOCATE: 'locate',
SUPPORT_CLEAN_SPOT: 'clean_spot',
}
STRING_TO_SERVICE = {v: k for k, v in SERVICE_TO_STRING.items()}
def services_to_strings(services):
"""Convert SUPPORT_* service bitmask to list of service strings."""
strings = []
for service in SERVICE_TO_STRING:
if service & services:
strings.append(SERVICE_TO_STRING[service])
return strings
def strings_to_services(strings):
"""Convert service strings to SUPPORT_* service bitmask."""
services = 0
for string in strings:
services |= STRING_TO_SERVICE[string]
return services
DEFAULT_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_STOP |\
SUPPORT_RETURN_HOME | SUPPORT_STATUS | SUPPORT_BATTERY |\
SUPPORT_CLEAN_SPOT
ALL_SERVICES = DEFAULT_SERVICES | SUPPORT_PAUSE | SUPPORT_LOCATE |\
SUPPORT_FAN_SPEED | SUPPORT_SEND_COMMAND
CONF_SUPPORTED_FEATURES = ATTR_SUPPORTED_FEATURES
CONF_PAYLOAD_TURN_ON = 'payload_turn_on'
CONF_PAYLOAD_TURN_OFF = 'payload_turn_off'
CONF_PAYLOAD_RETURN_TO_BASE = 'payload_return_to_base'
CONF_PAYLOAD_STOP = 'payload_stop'
CONF_PAYLOAD_CLEAN_SPOT = 'payload_clean_spot'
CONF_PAYLOAD_LOCATE = 'payload_locate'
CONF_PAYLOAD_START_PAUSE = 'payload_start_pause'
CONF_BATTERY_LEVEL_TOPIC = 'battery_level_topic'
CONF_BATTERY_LEVEL_TEMPLATE = 'battery_level_template'
CONF_CHARGING_TOPIC = 'charging_topic'
CONF_CHARGING_TEMPLATE = 'charging_template'
CONF_CLEANING_TOPIC = 'cleaning_topic'
CONF_CLEANING_TEMPLATE = 'cleaning_template'
CONF_DOCKED_TOPIC = 'docked_topic'
CONF_DOCKED_TEMPLATE = 'docked_template'
CONF_ERROR_TOPIC = 'error_topic'
CONF_ERROR_TEMPLATE = 'error_template'
CONF_STATE_TOPIC = 'state_topic'
CONF_STATE_TEMPLATE = 'state_template'
CONF_FAN_SPEED_TOPIC = 'fan_speed_topic'
CONF_FAN_SPEED_TEMPLATE = 'fan_speed_template'
CONF_SET_FAN_SPEED_TOPIC = 'set_fan_speed_topic'
CONF_FAN_SPEED_LIST = 'fan_speed_list'
CONF_SEND_COMMAND_TOPIC = 'send_command_topic'
DEFAULT_NAME = 'MQTT Vacuum'
DEFAULT_RETAIN = False
DEFAULT_SERVICE_STRINGS = services_to_strings(DEFAULT_SERVICES)
DEFAULT_PAYLOAD_TURN_ON = 'turn_on'
DEFAULT_PAYLOAD_TURN_OFF = 'turn_off'
DEFAULT_PAYLOAD_RETURN_TO_BASE = 'return_to_base'
DEFAULT_PAYLOAD_STOP = 'stop'
DEFAULT_PAYLOAD_CLEAN_SPOT = 'clean_spot'
DEFAULT_PAYLOAD_LOCATE = 'locate'
DEFAULT_PAYLOAD_START_PAUSE = 'start_pause'
PLATFORM_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SUPPORTED_FEATURES, default=DEFAULT_SERVICE_STRINGS):
vol.All(cv.ensure_list, [vol.In(STRING_TO_SERVICE.keys())]),
vol.Optional(mqtt.CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(mqtt.CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_PAYLOAD_TURN_ON,
default=DEFAULT_PAYLOAD_TURN_ON): cv.string,
vol.Optional(CONF_PAYLOAD_TURN_OFF,
default=DEFAULT_PAYLOAD_TURN_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_RETURN_TO_BASE,
default=DEFAULT_PAYLOAD_RETURN_TO_BASE): cv.string,
vol.Optional(CONF_PAYLOAD_STOP,
default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(CONF_PAYLOAD_CLEAN_SPOT,
default=DEFAULT_PAYLOAD_CLEAN_SPOT): cv.string,
vol.Optional(CONF_PAYLOAD_LOCATE,
default=DEFAULT_PAYLOAD_LOCATE): cv.string,
vol.Optional(CONF_PAYLOAD_START_PAUSE,
default=DEFAULT_PAYLOAD_START_PAUSE): cv.string,
vol.Optional(CONF_BATTERY_LEVEL_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_BATTERY_LEVEL_TEMPLATE): cv.template,
vol.Optional(CONF_CHARGING_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_CHARGING_TEMPLATE): cv.template,
vol.Optional(CONF_CLEANING_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_CLEANING_TEMPLATE): cv.template,
vol.Optional(CONF_DOCKED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_DOCKED_TEMPLATE): cv.template,
vol.Optional(CONF_ERROR_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_ERROR_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_SPEED_TEMPLATE): cv.template,
vol.Optional(CONF_SET_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_SPEED_LIST, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SEND_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up MQTT vacuum through configuration.yaml."""
await _async_setup_entity(config, async_add_entities,
discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT vacuum dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT vacuum."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(DOMAIN, 'mqtt'), async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash=None):
"""Set up the MQTT vacuum."""
async_add_entities([MqttVacuum(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttVacuum(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, VacuumDevice):
"""Representation of a MQTT-controlled vacuum."""
def __init__(self, config, config_entry, discovery_info):
"""Initialize the vacuum."""
self._cleaning = False
self._charging = False
self._docked = False
self._error = None
self._status = 'Unknown'
self._battery_level = 0
self._fan_speed = 'unknown'
self._fan_speed_list = []
self._sub_state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_info,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
def _setup_from_config(self, config):
self._name = config.get(CONF_NAME)
supported_feature_strings = config.get(CONF_SUPPORTED_FEATURES)
self._supported_features = strings_to_services(
supported_feature_strings
)
self._fan_speed_list = config.get(CONF_FAN_SPEED_LIST)
self._qos = config.get(mqtt.CONF_QOS)
self._retain = config.get(mqtt.CONF_RETAIN)
self._command_topic = config.get(mqtt.CONF_COMMAND_TOPIC)
self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)
self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)
self._payloads = {
key: config.get(key) for key in (
CONF_PAYLOAD_TURN_ON,
CONF_PAYLOAD_TURN_OFF,
CONF_PAYLOAD_RETURN_TO_BASE,
CONF_PAYLOAD_STOP,
CONF_PAYLOAD_CLEAN_SPOT,
CONF_PAYLOAD_LOCATE,
CONF_PAYLOAD_START_PAUSE
)
}
self._state_topics = {
key: config.get(key) for key in (
CONF_BATTERY_LEVEL_TOPIC,
CONF_CHARGING_TOPIC,
CONF_CLEANING_TOPIC,
CONF_DOCKED_TOPIC,
CONF_ERROR_TOPIC,
CONF_FAN_SPEED_TOPIC
)
}
self._templates = {
key: config.get(key) for key in (
CONF_BATTERY_LEVEL_TEMPLATE,
CONF_CHARGING_TEMPLATE,
CONF_CLEANING_TEMPLATE,
CONF_DOCKED_TEMPLATE,
CONF_ERROR_TEMPLATE,
CONF_FAN_SPEED_TEMPLATE
)
}
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
await subscription.async_unsubscribe_topics(self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
@callback
def message_received(topic, payload, qos):
"""Handle new MQTT message."""
if topic == self._state_topics[CONF_BATTERY_LEVEL_TOPIC] and \
self._templates[CONF_BATTERY_LEVEL_TEMPLATE]:
battery_level = self._templates[CONF_BATTERY_LEVEL_TEMPLATE]\
.async_render_with_possible_json_value(
payload, error_value=None)
if battery_level is not None:
self._battery_level = int(battery_level)
if topic == self._state_topics[CONF_CHARGING_TOPIC] and \
self._templates[CONF_CHARGING_TEMPLATE]:
charging = self._templates[CONF_CHARGING_TEMPLATE]\
.async_render_with_possible_json_value(
payload, error_value=None)
if charging is not None:
self._charging = cv.boolean(charging)
if topic == self._state_topics[CONF_CLEANING_TOPIC] and \
self._templates[CONF_CLEANING_TEMPLATE]:
cleaning = self._templates[CONF_CLEANING_TEMPLATE]\
.async_render_with_possible_json_value(
payload, error_value=None)
if cleaning is not None:
self._cleaning = cv.boolean(cleaning)
if topic == self._state_topics[CONF_DOCKED_TOPIC] and \
self._templates[CONF_DOCKED_TEMPLATE]:
docked = self._templates[CONF_DOCKED_TEMPLATE]\
.async_render_with_possible_json_value(
payload, error_value=None)
if docked is not None:
self._docked = cv.boolean(docked)
if topic == self._state_topics[CONF_ERROR_TOPIC] and \
self._templates[CONF_ERROR_TEMPLATE]:
error = self._templates[CONF_ERROR_TEMPLATE]\
.async_render_with_possible_json_value(
payload, error_value=None)
if error is not None:
self._error = cv.string(error)
if self._docked:
if self._charging:
self._status = "Docked & Charging"
else:
self._status = "Docked"
elif self._cleaning:
self._status = "Cleaning"
elif self._error is not None and not self._error:
self._status = "Error: {}".format(self._error)
else:
self._status = "Stopped"
if topic == self._state_topics[CONF_FAN_SPEED_TOPIC] and \
self._templates[CONF_FAN_SPEED_TEMPLATE]:
fan_speed = self._templates[CONF_FAN_SPEED_TEMPLATE]\
.async_render_with_possible_json_value(
payload, error_value=None)
if fan_speed is not None:
self._fan_speed = fan_speed
self.async_write_ha_state()
topics_list = {topic for topic in self._state_topics.values() if topic}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
{
"topic{}".format(i): {
"topic": topic,
"msg_callback": message_received,
"qos": self._qos
} for i, topic in enumerate(topics_list)
}
)
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for an MQTT vacuum."""
return False
@property
def is_on(self):
"""Return true if vacuum is on."""
return self._cleaning
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def status(self):
"""Return a status string for the vacuum."""
if self.supported_features & SUPPORT_STATUS == 0:
return
return self._status
@property
def fan_speed(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return []
return self._fan_speed_list
@property
def battery_level(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return icon_for_battery_level(
battery_level=self.battery_level, charging=self._charging)
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs):
"""Turn the vacuum on."""
if self.supported_features & SUPPORT_TURN_ON == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_TURN_ON],
self._qos, self._retain)
self._status = 'Cleaning'
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the vacuum off."""
if self.supported_features & SUPPORT_TURN_OFF == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_TURN_OFF],
self._qos, self._retain)
self._status = 'Turning Off'
self.async_write_ha_state()
async def async_stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_STOP],
self._qos, self._retain)
self._status = 'Stopping the current task'
self.async_write_ha_state()
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_CLEAN_SPOT],
self._qos, self._retain)
self._status = "Cleaning spot"
self.async_write_ha_state()
async def async_locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_LOCATE],
self._qos, self._retain)
self._status = "Hi, I'm over here!"
self.async_write_ha_state()
async def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_START_PAUSE],
self._qos, self._retain)
self._status = 'Pausing/Resuming cleaning...'
self.async_write_ha_state()
async def async_return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_RETURN_TO_BASE],
self._qos, self._retain)
self._status = 'Returning home...'
self.async_write_ha_state()
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
if not self._fan_speed_list or fan_speed not in self._fan_speed_list:
return
mqtt.async_publish(self.hass, self._set_fan_speed_topic,
fan_speed, self._qos, self._retain)
self._status = "Setting fan to {}...".format(fan_speed)
self.async_write_ha_state()
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return
mqtt.async_publish(self.hass, self._send_command_topic,
command, self._qos, self._retain)
self._status = "Sending command {}...".format(command)
self.async_write_ha_state()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
test/extended/networking/util.go
|
package networking
import (
"context"
"fmt"
"net"
"os"
"os/exec"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
configv1 "github.com/openshift/api/config/v1"
projectv1 "github.com/openshift/api/project/v1"
networkclient "github.com/openshift/client-go/network/clientset/versioned/typed/network/v1"
"github.com/openshift/library-go/pkg/network/networkutils"
"k8s.io/kubernetes/test/e2e/framework/pod"
frameworkpod "k8s.io/kubernetes/test/e2e/framework/pod"
utilnet "k8s.io/utils/net"
exutil "github.com/openshift/origin/test/extended/util"
corev1 "k8s.io/api/core/v1"
kapierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
configv1client "github.com/openshift/client-go/config/clientset/versioned"
k8sclient "k8s.io/client-go/kubernetes"
)
type NodeType int
type IPFamily string
const (
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
podStartTimeout = 5 * time.Minute
// How often to poll pods and nodes.
poll = 5 * time.Second
// How wide to print pod names, by default. Useful for aligning printing to
// quickly scan through output.
podPrintWidth = 55
// Indicator for same or different node
SAME_NODE NodeType = iota
DIFFERENT_NODE NodeType = iota
// TODO get these defined as constandts in networkutils
openshiftSDNPluginName = "OpenShiftSDN"
OVNKubernetesPluginName = "OVNKubernetes"
// IP Address Families
IPv4 IPFamily = "ipv4"
IPv6 IPFamily = "ipv6"
DualStack IPFamily = "dual"
Unknown IPFamily = "unknown"
)
// IsIPv6 returns true if a group of ips are ipv6.
func isIpv6(ip []string) bool {
ipv6 := false
for _, ip := range ip {
netIP := net.ParseIP(ip)
if netIP != nil && netIP.To4() == nil {
ipv6 = true
} else {
ipv6 = false
}
}
return ipv6
}
func expectNoError(err error, explain ...interface{}) {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
func expectError(err error, explain ...interface{}) {
ExpectWithOffset(1, err).To(HaveOccurred(), explain...)
}
func launchWebserverService(client k8sclient.Interface, namespace, serviceName string, nodeName string) (serviceAddr string) {
labelSelector := make(map[string]string)
labelSelector["name"] = "web"
createPodForService(client, namespace, serviceName, nodeName, labelSelector)
servicePort := 8080
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
Port: int32(servicePort),
},
},
Selector: labelSelector,
},
}
serviceClient := client.CoreV1().Services(namespace)
_, err := serviceClient.Create(context.Background(), service, metav1.CreateOptions{})
expectNoError(err)
expectNoError(exutil.WaitForEndpoint(client, namespace, serviceName))
createdService, err := serviceClient.Get(context.Background(), serviceName, metav1.GetOptions{})
expectNoError(err)
serviceAddr = net.JoinHostPort(createdService.Spec.ClusterIP, strconv.Itoa(servicePort))
e2e.Logf("Target service IP/port is %s", serviceAddr)
return
}
func createPodForService(client k8sclient.Interface, namespace, serviceName string, nodeName string, labelMap map[string]string) {
exutil.LaunchWebserverPod(client, namespace, serviceName, nodeName)
// FIXME: make e2e.LaunchWebserverPod() set the label when creating the pod
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
podClient := client.CoreV1().Pods(namespace)
pod, err := podClient.Get(context.Background(), serviceName, metav1.GetOptions{})
if err != nil {
return err
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = labelMap
} else {
for key, value := range labelMap {
pod.ObjectMeta.Labels[key] = value
}
}
_, err = podClient.Update(context.Background(), pod, metav1.UpdateOptions{})
return err
})
expectNoError(err)
}
func createWebserverLBService(client k8sclient.Interface, namespace, serviceName, nodeName string,
externalIPs []string, epSelector map[string]string) error {
servicePort := 8080
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
Ports: []corev1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
Port: int32(servicePort),
TargetPort: intstr.IntOrString{Type: intstr.Int,
IntVal: 8080},
},
},
ExternalIPs: externalIPs,
Selector: epSelector,
},
}
serviceClient := client.CoreV1().Services(namespace)
e2e.Logf("creating service %s/%s", namespace, serviceName)
_, err := serviceClient.Create(context.Background(), service, metav1.CreateOptions{})
if err != nil {
return err
}
e2e.Logf("service %s/%s is created", namespace, serviceName)
if len(epSelector) > 0 {
err = exutil.WaitForEndpoint(client, namespace, serviceName)
if err != nil {
return err
}
e2e.Logf("endpoints for service %s/%s is up", namespace, serviceName)
}
_, err = serviceClient.Get(context.Background(), serviceName, metav1.GetOptions{})
return err
}
func checkConnectivityToHost(f *e2e.Framework, nodeName string, podName string, host string, timeout time.Duration) error {
e2e.Logf("Creating an exec pod on node %v", nodeName)
execPod := pod.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *corev1.Pod) {
pod.Spec.NodeName = nodeName
})
defer func() {
e2e.Logf("Cleaning up the exec pod")
err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.Background(), execPod.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}()
var stdout string
e2e.Logf("Waiting up to %v to wget %s", timeout, host)
cmd := fmt.Sprintf("wget -T 30 -qO- %s", host)
var err error
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2) {
stdout, err = e2e.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
e2e.Logf("got err: %v, retry until timeout", err)
continue
}
// Need to check output because wget -q might omit the error.
if strings.TrimSpace(stdout) == "" {
e2e.Logf("got empty stdout, retry until timeout")
continue
}
break
}
return err
}
var cachedNetworkPluginName *string
func openshiftSDNMode() string {
if cachedNetworkPluginName == nil {
// We don't use exutil.NewCLI() here because it can't be called from BeforeEach()
out, err := exec.Command(
"oc", "--kubeconfig="+exutil.KubeConfigPath(),
"get", "clusternetwork", "default",
"--template={{.pluginName}}",
).CombinedOutput()
pluginName := string(out)
if err != nil {
e2e.Logf("Could not check network plugin name: %v. Assuming the OpenshiftSDN plugin is not being used", err)
pluginName = ""
}
cachedNetworkPluginName = &pluginName
}
return *cachedNetworkPluginName
}
func platformType(configClient configv1client.Interface) (configv1.PlatformType, error) {
infrastructure, err := configClient.ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{})
if err != nil {
return "", err
}
return infrastructure.Status.PlatformStatus.Type, nil
}
func networkPluginName() string {
if cachedNetworkPluginName == nil {
// We don't use exutil.NewCLI() here because it can't be called from BeforeEach()
out, err := exec.Command(
"oc", "--kubeconfig="+exutil.KubeConfigPath(),
"get", "network", "cluster",
"--template={{.spec.networkType}}",
).CombinedOutput()
pluginName := string(out)
if err != nil {
e2e.Logf("Could not check network plugin name: %v. Assuming a non-OpenShift plugin", err)
pluginName = ""
}
cachedNetworkPluginName = &pluginName
}
return *cachedNetworkPluginName
}
func pluginIsolatesNamespaces() bool {
if os.Getenv("NETWORKING_E2E_ISOLATION") == "true" {
return true
}
// Assume that only the OpenShift SDN "multitenant" plugin isolates by default
return openshiftSDNMode() == networkutils.MultiTenantPluginName
}
func pluginImplementsNetworkPolicy() bool {
switch {
case os.Getenv("NETWORKING_E2E_NETWORKPOLICY") == "true":
return true
case networkPluginName() == openshiftSDNPluginName && openshiftSDNMode() == networkutils.NetworkPolicyPluginName:
return true
case networkPluginName() == OVNKubernetesPluginName:
return true
default:
// If we can't detect the plugin, we assume it doesn't support
// NetworkPolicy, so the tests will work under kubenet
return false
}
}
func makeNamespaceGlobal(oc *exutil.CLI, ns *corev1.Namespace) {
clientConfig := oc.AdminConfig()
networkClient := networkclient.NewForConfigOrDie(clientConfig)
netns, err := networkClient.NetNamespaces().Get(context.Background(), ns.Name, metav1.GetOptions{})
expectNoError(err)
netns.NetID = 0
_, err = networkClient.NetNamespaces().Update(context.Background(), netns, metav1.UpdateOptions{})
expectNoError(err)
}
func makeNamespaceScheduleToAllNodes(f *e2e.Framework) {
// to avoid hassles dealing with selector limits, set the namespace label selector to empty
// to allow targeting all nodes
for {
ns, err := f.ClientSet.CoreV1().Namespaces().Get(context.Background(), f.Namespace.Name, metav1.GetOptions{})
expectNoError(err)
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
ns.Annotations[projectv1.ProjectNodeSelector] = ""
_, err = f.ClientSet.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{})
if err == nil {
return
}
if kapierrs.IsConflict(err) {
continue
}
expectNoError(err)
}
}
func modifyNetworkConfig(configClient configv1client.Interface, autoAssignCIDRs, allowedCIDRs, rejectedCIDRs []string) {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
network, err := configClient.ConfigV1().Networks().Get(context.Background(), "cluster", metav1.GetOptions{})
expectNoError(err)
extIPConfig := &configv1.ExternalIPConfig{Policy: &configv1.ExternalIPPolicy{}}
if len(allowedCIDRs) != 0 || len(rejectedCIDRs) != 0 || len(autoAssignCIDRs) != 0 {
extIPConfig = &configv1.ExternalIPConfig{Policy: &configv1.ExternalIPPolicy{AllowedCIDRs: allowedCIDRs,
RejectedCIDRs: rejectedCIDRs}, AutoAssignCIDRs: autoAssignCIDRs}
}
network.Spec.ExternalIP = extIPConfig
_, err = configClient.ConfigV1().Networks().Update(context.Background(), network, metav1.UpdateOptions{})
return err
})
expectNoError(err)
}
// findAppropriateNodes tries to find a source and destination for a type of node connectivity
// test (same node, or different node).
func findAppropriateNodes(f *e2e.Framework, nodeType NodeType) (*corev1.Node, *corev1.Node, error) {
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
if err != nil {
e2e.Logf("Unable to get schedulable nodes due to %v", err)
return nil, nil, err
}
candidates := nodes.Items
if len(candidates) == 0 {
e2e.Failf("Unable to find any candidate nodes for e2e networking tests in \n%#v", nodes.Items)
}
// in general, avoiding masters is a good thing, so see if we can find nodes that aren't masters
if len(candidates) > 1 {
var withoutMasters []corev1.Node
// look for anything that has the label value master or infra and try to skip it
isAllowed := func(node *corev1.Node) bool {
for _, value := range node.Labels {
if value == "master" || value == "infra" {
return false
}
}
return true
}
for _, node := range candidates {
if !isAllowed(&node) {
continue
}
withoutMasters = append(withoutMasters, node)
}
if len(withoutMasters) >= 2 {
candidates = withoutMasters
}
}
var candidateNames, nodeNames []string
for _, node := range candidates {
candidateNames = append(candidateNames, node.Name)
}
for _, node := range nodes.Items {
nodeNames = append(nodeNames, node.Name)
}
if nodeType == DIFFERENT_NODE {
if len(candidates) <= 1 {
e2eskipper.Skipf("Only one node is available in this environment (%v out of %v)", candidateNames, nodeNames)
}
e2e.Logf("Using %s and %s for test (%v out of %v)", candidates[0].Name, candidates[1].Name, candidateNames, nodeNames)
return &candidates[0], &candidates[1], nil
}
e2e.Logf("Using %s for test (%v out of %v)", candidates[0].Name, candidateNames, nodeNames)
return &candidates[0], &candidates[0], nil
}
func checkPodIsolation(f1, f2 *e2e.Framework, nodeType NodeType) error {
makeNamespaceScheduleToAllNodes(f1)
makeNamespaceScheduleToAllNodes(f2)
serverNode, clientNode, err := findAppropriateNodes(f1, nodeType)
if err != nil {
return err
}
podName := "isolation-webserver"
defer f1.ClientSet.CoreV1().Pods(f1.Namespace.Name).Delete(context.Background(), podName, metav1.DeleteOptions{})
ip := exutil.LaunchWebserverPod(f1.ClientSet, f1.Namespace.Name, podName, serverNode.Name)
return checkConnectivityToHost(f2, clientNode.Name, "isolation-wget", ip, 10*time.Second)
}
func checkServiceConnectivity(serverFramework, clientFramework *e2e.Framework, nodeType NodeType) error {
makeNamespaceScheduleToAllNodes(serverFramework)
makeNamespaceScheduleToAllNodes(clientFramework)
serverNode, clientNode, err := findAppropriateNodes(serverFramework, nodeType)
if err != nil {
return err
}
podName := names.SimpleNameGenerator.GenerateName("service-")
defer serverFramework.ClientSet.CoreV1().Pods(serverFramework.Namespace.Name).Delete(context.Background(), podName, metav1.DeleteOptions{})
defer serverFramework.ClientSet.CoreV1().Services(serverFramework.Namespace.Name).Delete(context.Background(), podName, metav1.DeleteOptions{})
ip := launchWebserverService(serverFramework.ClientSet, serverFramework.Namespace.Name, podName, serverNode.Name)
return checkConnectivityToHost(clientFramework, clientNode.Name, "service-wget", ip, 10*time.Second)
}
func InNonIsolatingContext(body func()) {
Context("when using OpenshiftSDN in a mode that does not isolate namespaces by default", func() {
BeforeEach(func() {
if pluginIsolatesNamespaces() {
e2eskipper.Skipf("This plugin isolates namespaces by default.")
}
})
body()
})
}
func InIsolatingContext(body func()) {
Context("when using OpenshiftSDN in a mode that isolates namespaces by default", func() {
BeforeEach(func() {
if !pluginIsolatesNamespaces() {
e2eskipper.Skipf("This plugin does not isolate namespaces by default.")
}
})
body()
})
}
func InNetworkPolicyContext(body func()) {
Context("when using a plugin that implements NetworkPolicy", func() {
BeforeEach(func() {
if !pluginImplementsNetworkPolicy() {
e2eskipper.Skipf("This plugin does not implement NetworkPolicy.")
}
})
body()
})
}
func InopenshiftSDNModeContext(plugins []string, body func()) {
Context(fmt.Sprintf("when using one of the OpenshiftSDN modes '%s'", strings.Join(plugins, ", ")),
func() {
BeforeEach(func() {
found := false
for _, plugin := range plugins {
if openshiftSDNMode() == plugin {
found = true
break
}
}
if !found {
e2eskipper.Skipf("Not using one of the specified OpenshiftSDN modes")
}
})
body()
},
)
}
func InOpenShiftSDNContext(body func()) {
Context("when using openshift-sdn",
func() {
BeforeEach(func() {
if networkPluginName() != openshiftSDNPluginName {
e2eskipper.Skipf("Not using openshift-sdn")
}
})
body()
},
)
}
func InBareMetalIPv4ClusterContext(oc *exutil.CLI, body func()) {
Context("when running openshift ipv4 cluster on bare metal",
func() {
BeforeEach(func() {
pType, err := platformType(oc.AdminConfigClient())
expectNoError(err)
if pType != configv1.BareMetalPlatformType || getIPFamilyForCluster(oc.KubeFramework()) != IPv4 {
e2eskipper.Skipf("Not running in bare metal ipv4 cluster")
}
})
body()
},
)
}
func InIPv4ClusterContext(oc *exutil.CLI, body func()) {
Context("when running openshift ipv4 cluster",
func() {
BeforeEach(func() {
if getIPFamilyForCluster(oc.KubeFramework()) != IPv4 {
e2eskipper.Skipf("Not running in ipv4 cluster")
}
})
body()
},
)
}
func InOVNKubernetesContext(body func()) {
Context("when using openshift ovn-kubernetes",
func() {
BeforeEach(func() {
if networkPluginName() != OVNKubernetesPluginName {
e2eskipper.Skipf("Not using ovn-kubernetes")
}
})
body()
},
)
}
func createNetworkAttachmentDefinition(config *rest.Config, namespace string, name string, nadConfig string) error {
nadClient, err := networkAttachmentDefinitionClient(config)
if err != nil {
return err
}
networkAttachmentDefintion := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "k8s.cni.cncf.io/v1",
"kind": "NetworkAttachmentDefinition",
"metadata": map[string]interface{}{
"name": name,
"namespace": namespace,
},
"spec": map[string]interface{}{
"config": nadConfig,
},
},
}
_, err = nadClient.Namespace(namespace).Create(context.TODO(), networkAttachmentDefintion, metav1.CreateOptions{})
return err
}
func networkAttachmentDefinitionClient(config *rest.Config) (dynamic.NamespaceableResourceInterface, error) {
dynClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, err
}
nadGVR := schema.GroupVersionResource{
Group: "k8s.cni.cncf.io",
Version: "v1",
Resource: "network-attachment-definitions",
}
nadClient := dynClient.Resource(nadGVR)
return nadClient, nil
}
func getIPFamilyForCluster(f *e2e.Framework) IPFamily {
podIPs, err := createPod(f.ClientSet, f.Namespace.Name, "test-ip-family-pod")
expectNoError(err)
switch len(podIPs) {
case 1:
ip := net.ParseIP(podIPs[0].IP)
if ip.To4() != nil {
return IPv4
} else {
return IPv6
}
case 2:
ip1 := net.ParseIP(podIPs[0].IP)
ip2 := net.ParseIP(podIPs[1].IP)
if ip1 == nil || ip2 == nil {
return Unknown
}
if (ip1.To4() == nil) == (ip2.To4() == nil) {
return Unknown
}
return DualStack
default:
return Unknown
}
}
func createPod(client k8sclient.Interface, ns, generateName string) ([]corev1.PodIP, error) {
pod := frameworkpod.NewAgnhostPod(ns, "", nil, nil, nil)
pod.ObjectMeta.GenerateName = generateName
execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
expectNoError(err, "failed to create new pod in namespace: %s", ns)
var podIPs []corev1.PodIP
err = wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) {
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
podIPs = retrievedPod.Status.PodIPs
return retrievedPod.Status.Phase == corev1.PodRunning, nil
})
return podIPs, err
}
// SubnetIPs enumerates all IP addresses in an IP subnet (starting with the provided IP address and including the broadcast address).
func SubnetIPs(ipnet net.IPNet) ([]net.IP, error) {
var ipList []net.IP
ip := ipnet.IP
for ; ipnet.Contains(ip); ip = incIP(ip) {
ipList = append(ipList, ip)
}
return ipList, nil
}
// incIP increases the current IP address by one. This function works for both IPv4 and IPv6.
func incIP(ip net.IP) net.IP {
// allocate a new IP
newIp := make(net.IP, len(ip))
copy(newIp, ip)
byteIp := []byte(newIp)
l := len(byteIp)
var i int
for k := range byteIp {
// start with the rightmost index first
// increment it
// if the index is < 256, then no overflow happened and we increment and break
// else, continue to the next field in the byte
i = l - 1 - k
if byteIp[i] < 0xff {
byteIp[i]++
break
} else {
byteIp[i] = 0
}
}
return net.IP(byteIp)
}
// GetIPAddressFamily returns if this cloud uses IPv4 and/or IPv6.
func GetIPAddressFamily(oc *exutil.CLI) (bool, bool, error) {
var hasIPv4 bool
var hasIPv6 bool
var err error
networkConfig, err := oc.AdminOperatorClient().OperatorV1().Networks().Get(context.Background(), "cluster", metav1.GetOptions{})
if err != nil {
return false, false, err
}
for _, cidr := range networkConfig.Spec.ServiceNetwork {
if utilnet.IsIPv6CIDRString(cidr) {
hasIPv6 = true
} else {
hasIPv4 = true
}
}
return hasIPv4, hasIPv6, nil
}
|
[
"\"NETWORKING_E2E_ISOLATION\"",
"\"NETWORKING_E2E_NETWORKPOLICY\""
] |
[] |
[
"NETWORKING_E2E_ISOLATION",
"NETWORKING_E2E_NETWORKPOLICY"
] |
[]
|
["NETWORKING_E2E_ISOLATION", "NETWORKING_E2E_NETWORKPOLICY"]
|
go
| 2 | 0 | |
cli/global.go
|
package cli
import (
"fmt"
"io/ioutil"
"log"
"os"
"github.com/99designs/aws-vault/v6/prompt"
"github.com/99designs/aws-vault/v6/vault"
"github.com/99designs/keyring"
"golang.org/x/crypto/ssh/terminal"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
var keyringConfigDefaults = keyring.Config{
ServiceName: "aws-vault",
FileDir: "~/.awsvault/keys/",
FilePasswordFunc: fileKeyringPassphrasePrompt,
LibSecretCollectionName: "awsvault",
KWalletAppID: "aws-vault",
KWalletFolder: "aws-vault",
KeychainTrustApplication: true,
WinCredPrefix: "aws-vault",
}
type AwsVault struct {
Debug bool
KeyringConfig keyring.Config
KeyringBackend string
PromptDriver string
keyringImpl keyring.Keyring
awsConfigFile *vault.ConfigFile
configLoader *vault.ConfigLoader
}
func (a *AwsVault) Keyring() (keyring.Keyring, error) {
if a.keyringImpl == nil {
if a.KeyringBackend != "" {
a.KeyringConfig.AllowedBackends = []keyring.BackendType{keyring.BackendType(a.KeyringBackend)}
}
var err error
a.keyringImpl, err = keyring.Open(a.KeyringConfig)
if err != nil {
return nil, err
}
}
return a.keyringImpl, nil
}
func (a *AwsVault) AwsConfigFile() (*vault.ConfigFile, error) {
if a.awsConfigFile == nil {
var err error
a.awsConfigFile, err = vault.LoadConfigFromEnv()
if err != nil {
return nil, err
}
}
return a.awsConfigFile, nil
}
func (a *AwsVault) MustGetProfileNames() []string {
config, err := a.AwsConfigFile()
if err != nil {
log.Fatalf("Error loading AWS config: %s", err.Error())
}
return config.ProfileNames()
}
func (a *AwsVault) ConfigLoader() (*vault.ConfigLoader, error) {
if a.configLoader == nil {
awsConfigFile, err := a.AwsConfigFile()
if err != nil {
return nil, err
}
a.configLoader = &vault.ConfigLoader{File: awsConfigFile}
}
return a.configLoader, nil
}
func ConfigureGlobals(app *kingpin.Application) *AwsVault {
a := &AwsVault{
KeyringConfig: keyringConfigDefaults,
}
backendsAvailable := []string{}
for _, backendType := range keyring.AvailableBackends() {
backendsAvailable = append(backendsAvailable, string(backendType))
}
promptsAvailable := prompt.Available()
app.Flag("debug", "Show debugging output").
BoolVar(&a.Debug)
app.Flag("backend", fmt.Sprintf("Secret backend to use %v", backendsAvailable)).
Envar("AWS_VAULT_BACKEND").
EnumVar(&a.KeyringBackend, backendsAvailable...)
app.Flag("prompt", fmt.Sprintf("Prompt driver to use %v", promptsAvailable)).
Default("terminal").
Envar("AWS_VAULT_PROMPT").
EnumVar(&a.PromptDriver, promptsAvailable...)
app.Flag("keychain", "Name of macOS keychain to use, if it doesn't exist it will be created").
Default("aws-vault").
Envar("AWS_VAULT_KEYCHAIN_NAME").
StringVar(&a.KeyringConfig.KeychainName)
app.Flag("pass-dir", "Pass password store directory").
Envar("AWS_VAULT_PASS_PASSWORD_STORE_DIR").
StringVar(&a.KeyringConfig.PassDir)
app.Flag("pass-cmd", "Name of the pass executable").
Envar("AWS_VAULT_PASS_CMD").
StringVar(&a.KeyringConfig.PassCmd)
app.Flag("pass-prefix", "Prefix to prepend to the item path stored in pass").
Envar("AWS_VAULT_PASS_PREFIX").
StringVar(&a.KeyringConfig.PassPrefix)
app.PreAction(func(c *kingpin.ParseContext) error {
if !a.Debug {
log.SetOutput(ioutil.Discard)
}
keyring.Debug = a.Debug
log.Printf("aws-vault %s", app.Model().Version)
return nil
})
return a
}
func fileKeyringPassphrasePrompt(prompt string) (string, error) {
if password := os.Getenv("AWS_VAULT_FILE_PASSPHRASE"); password != "" {
return password, nil
}
fmt.Fprintf(os.Stderr, "%s: ", prompt)
b, err := terminal.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
return "", err
}
fmt.Println()
return string(b), nil
}
|
[
"\"AWS_VAULT_FILE_PASSPHRASE\""
] |
[] |
[
"AWS_VAULT_FILE_PASSPHRASE"
] |
[]
|
["AWS_VAULT_FILE_PASSPHRASE"]
|
go
| 1 | 0 | |
src/poe_character_exporter/handler.py
|
import json
import logging
import uuid
import os
from datetime import timezone, datetime
import boto3
from poe_character_exporter.character import get_character, format_item
logger = logging.getLogger(__name__)
if os.environ.get("LOG_LEVEL"):
logger.setLevel(os.environ["LOG_LEVEL"])
else:
logger.setLevel("INFO")
def handler(event, context):
ddb = boto3.resource("dynamodb")
if not event.get("CorrelationId"):
logger.warning(f"Missing correlation id in the envent! Generating one...")
correlation_id = uuid.uuid4()
else:
correlation_id = event["CorrelationId"]
logger.debug(f"Started handler {correlation_id}")
# due to the rate limiting of the poe character API
for i in range(0,44):
if not event["characters"]:
logger.info(f"All characters processed, adding -1 to the event")
event["characters"] = -1
if event["characters"] == -1:
break
c = event["characters"].pop(0)
character = get_character(c["account"], c["character"])
if character.get("error"):
error = character["error"]
if error["code"] == 1:
logger.info(f"Character could not be loaded, skipping...")
elif error["code"] == 2:
logger.warning(f"Early exit from the character loop, rate limit too high")
break
else:
poe_character_table = ddb.Table("poe_item_alerts_characters")
for item in character["items"]:
ddb_item = format_item(item)
ddb_item["character_name"] = c["character"]
ddb_item["character_class"] = character["character"]["class"]
ddb_item["character_level"] = character["character"]["level"]
ddb_item["account_name"] = c["account"]
current_epoch = int(datetime.now(tz=timezone.utc).timestamp())
ddb_item["created"] = current_epoch
ddb_item["ttl"] = current_epoch + 86400
ddb_item["dead"] = c["dead"]
poe_character_table.put_item(Item=ddb_item)
logger.info(f"Ingested {c['character']}")
return event
|
[] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
python
| 1 | 0 | |
mslib/msui/constants.py
|
# -*- coding: utf-8 -*-
"""
mslib.msui.constants
~~~~~~~~~~~~~~~~~~~~
This module provides constants
This file is part of mss.
:copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.
:copyright: Copyright 2011-2014 Marc Rautenhaus (mr), Tongxi Lou (tl)
:copyright: Copyright 2016-2017 Reimar Bauer
:copyright: Copyright 2016-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fs
import os
import logging
HOME = os.path.expanduser(f"~{os.path.sep}")
MSS_CONFIG_PATH = os.getenv("MSS_CONFIG_PATH", os.path.join(HOME, ".config", "mss"))
if '://' in MSS_CONFIG_PATH:
try:
_fs = fs.open_fs(MSS_CONFIG_PATH)
except fs.errors.CreateFailed:
_fs.makedirs(MSS_CONFIG_PATH)
except fs.opener.errors.UnsupportedProtocol:
logging.error(f'FS url "{MSS_CONFIG_PATH}" not supported')
else:
_dir = os.path.expanduser(MSS_CONFIG_PATH)
if not os.path.exists(_dir):
os.makedirs(_dir)
MSS_SETTINGS = os.getenv('MSS_SETTINGS', os.path.join(MSS_CONFIG_PATH, "mss_settings.json"))
WMS_LOGIN_CACHE = {}
MSC_LOGIN_CACHE = {}
CACHED_CONFIG_FILE = None
if os.path.exists(MSS_SETTINGS):
CACHED_CONFIG_FILE = MSS_SETTINGS
POSIX = {"application_destination": os.path.join(HOME, ".local/share/applications/mss{}.desktop"),
"icon_destination": os.path.join(HOME, ".local/share/icons/hicolor/{}/apps/mss-logo{}.png"),
"desktop": """[Desktop Entry]
Name=mss {}
Comment=A web service based tool to plan atmospheric research flights (mission support system).
Keywords=documentation;information;
Exec={}
Icon={}
Type=Application
Categories=Science;Education;
StartupNotify=true
X-GNOME-SingleWindow=false
X-Ubuntu-Gettext-Domain=mss
"""}
|
[] |
[] |
[
"MSS_SETTINGS",
"MSS_CONFIG_PATH"
] |
[]
|
["MSS_SETTINGS", "MSS_CONFIG_PATH"]
|
python
| 2 | 0 | |
pkg/processor/trigger/http/test/suite/suite.go
|
/*
Copyright 2017 The Nuclio Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package httpsuite
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
"time"
"github.com/nuclio/nuclio/pkg/platform"
"github.com/nuclio/nuclio/pkg/processor/test/suite"
"github.com/nuclio/nuclio/test/compare"
"github.com/nuclio/nuclio-sdk-go"
)
// EventFields for events
type EventFields struct {
ID nuclio.ID `json:"id,omitempty"`
TriggerClass string `json:"triggerClass,omitempty"`
TriggerKind string `json:"eventType,omitempty"`
ContentType string `json:"contentType,omitempty"`
Headers map[string]interface{} `json:"headers,omitempty"`
Timestamp time.Time `json:"timestamp,omitempty"`
Path string `json:"path,omitempty"`
URL string `json:"url,omitempty"`
Method string `json:"method,omitempty"`
ShardID int `json:"shardID,omitempty"`
TotalNumShards int `json:"totalNumShards,omitempty"`
Type string `json:"type,omitempty"`
TypeVersion string `json:"typeVersion,omitempty"`
Version string `json:"version,omitempty"`
Body string `json:"body,omitempty"`
}
// Request holds information about test HTTP request and response
type Request struct {
Name string
RequestBody string
RequestHeaders map[string]interface{}
RequestLogLevel *string
RequestMethod string
RequestPath string
RequestPort int
ExpectedLogMessages []string
ExpectedLogRecords []map[string]interface{}
ExpectedResponseBody interface{}
ExpectedResponseHeaders map[string]string
ExpectedResponseStatusCode *int
}
// TestSuite is an HTTP test suite
type TestSuite struct {
processorsuite.TestSuite
httpClient *http.Client
}
// SetupTest runs before every test
func (suite *TestSuite) SetupTest() {
suite.TestSuite.SetupTest()
suite.httpClient = &http.Client{
Timeout: 5 * time.Second,
}
}
// DeployFunctionAndExpectError runs a function, expecting an error
func (suite *TestSuite) DeployFunctionAndExpectError(createFunctionOptions *platform.CreateFunctionOptions, expectedMessage string) {
// add some more common CreateFunctionOptions
suite.PopulateDeployOptions(createFunctionOptions)
_, err := suite.Platform.CreateFunction(createFunctionOptions)
suite.Require().Error(err, expectedMessage)
}
// DeployFunctionAndRequest deploys a function and call it with request
func (suite *TestSuite) DeployFunctionAndRequest(createFunctionOptions *platform.CreateFunctionOptions,
request *Request) *platform.CreateFunctionResult {
defaultStatusCode := http.StatusOK
if request.ExpectedResponseStatusCode == nil {
request.ExpectedResponseStatusCode = &defaultStatusCode
}
// by default BuildAndRunFunction will map 8080
if request.RequestPort == 0 {
request.RequestPort = 8080
}
if request.RequestPath == "" {
request.RequestPath = "/"
}
if request.RequestMethod == "" {
request.RequestMethod = "POST"
}
return suite.DeployFunction(createFunctionOptions, func(deployResult *platform.CreateFunctionResult) bool {
// modify request port to that of the deployed
request.RequestPort = deployResult.Port
return suite.SendRequestVerifyResponse(request)
})
}
// SendRequestVerifyResponse sends a request and verifies we got expected response
func (suite *TestSuite) SendRequestVerifyResponse(request *Request) bool {
suite.Logger.DebugWith("Sending request",
"requestPort", request.RequestPort,
"requestPath", request.RequestPath,
"requestHeaders", request.RequestHeaders,
"requestBody", request.RequestBody,
"requestLogLevel", request.RequestLogLevel)
baseURL := "localhost"
// change verify-url if needed to ask from docker ip
if os.Getenv("NUCLIO_TEST_HOST") != "" {
baseURL = os.Getenv("NUCLIO_TEST_HOST")
}
// Send request to proper url
url := fmt.Sprintf("http://%s:%d%s", baseURL, request.RequestPort, request.RequestPath)
// create a request
httpRequest, err := http.NewRequest(request.RequestMethod, url, strings.NewReader(request.RequestBody))
suite.Require().NoError(err)
// if there are request headers, add them
if request.RequestHeaders != nil {
for requestHeaderName, requestHeaderValue := range request.RequestHeaders {
httpRequest.Header.Add(requestHeaderName, fmt.Sprintf("%v", requestHeaderValue))
}
} else {
httpRequest.Header.Add("Content-Type", "text/plain")
}
// if there is a log level, add the header
if request.RequestLogLevel != nil {
httpRequest.Header.Add("X-nuclio-log-level", *request.RequestLogLevel)
}
// invoke the function
httpResponse, err := suite.httpClient.Do(httpRequest)
// if we fail to connect, fail
if err != nil && strings.Contains(err.Error(), "EOF") {
time.Sleep(500 * time.Millisecond)
return false
}
suite.Require().NoError(err)
if request.ExpectedResponseStatusCode != nil {
suite.Require().Equal(*request.ExpectedResponseStatusCode,
httpResponse.StatusCode,
"Got unexpected status code with request body (%s)",
request.RequestBody)
}
body, err := ioutil.ReadAll(httpResponse.Body)
suite.Require().NoError(err)
// verify header correctness
if request.ExpectedResponseHeaders != nil {
// the httpResponse may contain more headers. just check that all the expected
// headers contain the proper values
for expectedHeaderName, expectedHeaderValue := range request.ExpectedResponseHeaders {
suite.Require().Equal(expectedHeaderValue, httpResponse.Header.Get(expectedHeaderName))
}
}
// verify body correctness
switch typedExpectedResponseBody := request.ExpectedResponseBody.(type) {
// if it's a simple string - just compare
case string:
suite.Require().Equal(typedExpectedResponseBody, string(body))
// if it's a map - assume JSON
case map[string]interface{}:
// verify content type is JSON
suite.Require().Equal("application/json", httpResponse.Header.Get("Content-Type"))
// unmarshall the body
unmarshalledBody := make(map[string]interface{})
err := json.Unmarshal(body, &unmarshalledBody)
suite.Require().NoError(err)
suite.Require().True(compare.CompareNoOrder(typedExpectedResponseBody, unmarshalledBody))
case *regexp.Regexp:
suite.Require().Regexp(typedExpectedResponseBody, string(body))
case func([]byte):
typedExpectedResponseBody(body)
}
// if there are logs expected, verify them
if request.ExpectedLogMessages != nil {
decodedLogRecords := []map[string]interface{}{}
// decode the logs in the header
encodedLogs := httpResponse.Header.Get("X-nuclio-logs")
err := json.Unmarshal([]byte(encodedLogs), &decodedLogRecords)
suite.Require().NoError(err)
receivedLogMessages := []string{}
// create a list of messages
for _, decodedLogRecord := range decodedLogRecords {
// add the message to the list
receivedLogMessages = append(receivedLogMessages, decodedLogRecord["message"].(string))
}
// now compare the expected and received logs
suite.Require().Equal(request.ExpectedLogMessages, receivedLogMessages)
}
if request.ExpectedLogRecords != nil {
decodedLogRecords := []map[string]interface{}{}
// decode the logs in the header
encodedLogs := httpResponse.Header.Get("X-nuclio-logs")
err := json.Unmarshal([]byte(encodedLogs), &decodedLogRecords)
suite.Require().NoError(err)
suite.Require().Equal(len(request.ExpectedLogRecords), len(decodedLogRecords))
for i, expected := range request.ExpectedLogRecords {
logRecord := decodedLogRecords[i]
subLogRecord := suite.subMap(logRecord, expected)
suite.Require().Equal(expected, subLogRecord)
}
}
return true
}
// subMap returns a subset of source with only the keys in keys
// e.g. subMap({"a": 1, "b": 2, "c": 3}, {"b": 7, "c": 20}) -> {"b": 2, "c": 3}
func (suite *TestSuite) subMap(source, keys map[string]interface{}) map[string]interface{} {
sub := make(map[string]interface{})
for key := range keys {
sub[key] = source[key]
}
return sub
}
|
[
"\"NUCLIO_TEST_HOST\"",
"\"NUCLIO_TEST_HOST\""
] |
[] |
[
"NUCLIO_TEST_HOST"
] |
[]
|
["NUCLIO_TEST_HOST"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Project102.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.